Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ 63c73073

History | View | Annotate | Download (105.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41

    
42

    
43
# Size of reads in _CanReadDevice
44
_DEVICE_READ_SIZE = 128 * 1024
45

    
46

    
47
def _IgnoreError(fn, *args, **kwargs):
48
  """Executes the given function, ignoring BlockDeviceErrors.
49

50
  This is used in order to simplify the execution of cleanup or
51
  rollback functions.
52

53
  @rtype: boolean
54
  @return: True when fn didn't raise an exception, False otherwise
55

56
  """
57
  try:
58
    fn(*args, **kwargs)
59
    return True
60
  except errors.BlockDeviceError, err:
61
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
62
    return False
63

    
64

    
65
def _ThrowError(msg, *args):
66
  """Log an error to the node daemon and the raise an exception.
67

68
  @type msg: string
69
  @param msg: the text of the exception
70
  @raise errors.BlockDeviceError
71

72
  """
73
  if args:
74
    msg = msg % args
75
  logging.error(msg)
76
  raise errors.BlockDeviceError(msg)
77

    
78

    
79
def _CheckResult(result):
80
  """Throws an error if the given result is a failed one.
81

82
  @param result: result from RunCmd
83

84
  """
85
  if result.failed:
86
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
87
                result.output)
88

    
89

    
90
def _CanReadDevice(path):
91
  """Check if we can read from the given device.
92

93
  This tries to read the first 128k of the device.
94

95
  """
96
  try:
97
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
98
    return True
99
  except EnvironmentError:
100
    logging.warning("Can't read from device %s", path, exc_info=True)
101
    return False
102

    
103

    
104
def _GetForbiddenFileStoragePaths():
105
  """Builds a list of path prefixes which shouldn't be used for file storage.
106

107
  @rtype: frozenset
108

109
  """
110
  paths = set([
111
    "/boot",
112
    "/dev",
113
    "/etc",
114
    "/home",
115
    "/proc",
116
    "/root",
117
    "/sys",
118
    ])
119

    
120
  for prefix in ["", "/usr", "/usr/local"]:
121
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
122
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
123

    
124
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
125

    
126

    
127
def _ComputeWrongFileStoragePaths(paths,
128
                                  _forbidden=_GetForbiddenFileStoragePaths()):
129
  """Cross-checks a list of paths for prefixes considered bad.
130

131
  Some paths, e.g. "/bin", should not be used for file storage.
132

133
  @type paths: list
134
  @param paths: List of paths to be checked
135
  @rtype: list
136
  @return: Sorted list of paths for which the user should be warned
137

138
  """
139
  def _Check(path):
140
    return (not os.path.isabs(path) or
141
            path in _forbidden or
142
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
143

    
144
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
145

    
146

    
147
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
148
  """Returns a list of file storage paths whose prefix is considered bad.
149

150
  See L{_ComputeWrongFileStoragePaths}.
151

152
  """
153
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
154

    
155

    
156
def _CheckFileStoragePath(path, allowed):
157
  """Checks if a path is in a list of allowed paths for file storage.
158

159
  @type path: string
160
  @param path: Path to check
161
  @type allowed: list
162
  @param allowed: List of allowed paths
163
  @raise errors.FileStoragePathError: If the path is not allowed
164

165
  """
166
  if not os.path.isabs(path):
167
    raise errors.FileStoragePathError("File storage path must be absolute,"
168
                                      " got '%s'" % path)
169

    
170
  for i in allowed:
171
    if not os.path.isabs(i):
172
      logging.info("Ignoring relative path '%s' for file storage", i)
173
      continue
174

    
175
    if utils.IsBelowDir(i, path):
176
      break
177
  else:
178
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
179
                                      " storage" % path)
180

    
181

    
182
def _LoadAllowedFileStoragePaths(filename):
183
  """Loads file containing allowed file storage paths.
184

185
  @rtype: list
186
  @return: List of allowed paths (can be an empty list)
187

188
  """
189
  try:
190
    contents = utils.ReadFile(filename)
191
  except EnvironmentError:
192
    return []
193
  else:
194
    return utils.FilterEmptyLinesAndComments(contents)
195

    
196

    
197
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
198
  """Checks if a path is allowed for file storage.
199

200
  @type path: string
201
  @param path: Path to check
202
  @raise errors.FileStoragePathError: If the path is not allowed
203

204
  """
205
  allowed = _LoadAllowedFileStoragePaths(_filename)
206

    
207
  if _ComputeWrongFileStoragePaths([path]):
208
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
209
                                      path)
210

    
211
  _CheckFileStoragePath(path, allowed)
212

    
213

    
214
class BlockDev(object):
215
  """Block device abstract class.
216

217
  A block device can be in the following states:
218
    - not existing on the system, and by `Create()` it goes into:
219
    - existing but not setup/not active, and by `Assemble()` goes into:
220
    - active read-write and by `Open()` it goes into
221
    - online (=used, or ready for use)
222

223
  A device can also be online but read-only, however we are not using
224
  the readonly state (LV has it, if needed in the future) and we are
225
  usually looking at this like at a stack, so it's easier to
226
  conceptualise the transition from not-existing to online and back
227
  like a linear one.
228

229
  The many different states of the device are due to the fact that we
230
  need to cover many device types:
231
    - logical volumes are created, lvchange -a y $lv, and used
232
    - drbd devices are attached to a local disk/remote peer and made primary
233

234
  A block device is identified by three items:
235
    - the /dev path of the device (dynamic)
236
    - a unique ID of the device (static)
237
    - it's major/minor pair (dynamic)
238

239
  Not all devices implement both the first two as distinct items. LVM
240
  logical volumes have their unique ID (the pair volume group, logical
241
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
242
  the /dev path is again dynamic and the unique id is the pair (host1,
243
  dev1), (host2, dev2).
244

245
  You can get to a device in two ways:
246
    - creating the (real) device, which returns you
247
      an attached instance (lvcreate)
248
    - attaching of a python instance to an existing (real) device
249

250
  The second point, the attachement to a device, is different
251
  depending on whether the device is assembled or not. At init() time,
252
  we search for a device with the same unique_id as us. If found,
253
  good. It also means that the device is already assembled. If not,
254
  after assembly we'll have our correct major/minor.
255

256
  """
257
  def __init__(self, unique_id, children, size, params):
258
    self._children = children
259
    self.dev_path = None
260
    self.unique_id = unique_id
261
    self.major = None
262
    self.minor = None
263
    self.attached = False
264
    self.size = size
265
    self.params = params
266

    
267
  def Assemble(self):
268
    """Assemble the device from its components.
269

270
    Implementations of this method by child classes must ensure that:
271
      - after the device has been assembled, it knows its major/minor
272
        numbers; this allows other devices (usually parents) to probe
273
        correctly for their children
274
      - calling this method on an existing, in-use device is safe
275
      - if the device is already configured (and in an OK state),
276
        this method is idempotent
277

278
    """
279
    pass
280

    
281
  def Attach(self):
282
    """Find a device which matches our config and attach to it.
283

284
    """
285
    raise NotImplementedError
286

    
287
  def Close(self):
288
    """Notifies that the device will no longer be used for I/O.
289

290
    """
291
    raise NotImplementedError
292

    
293
  @classmethod
294
  def Create(cls, unique_id, children, size, params, excl_stor):
295
    """Create the device.
296

297
    If the device cannot be created, it will return None
298
    instead. Error messages go to the logging system.
299

300
    Note that for some devices, the unique_id is used, and for other,
301
    the children. The idea is that these two, taken together, are
302
    enough for both creation and assembly (later).
303

304
    """
305
    raise NotImplementedError
306

    
307
  def Remove(self):
308
    """Remove this device.
309

310
    This makes sense only for some of the device types: LV and file
311
    storage. Also note that if the device can't attach, the removal
312
    can't be completed.
313

314
    """
315
    raise NotImplementedError
316

    
317
  def Rename(self, new_id):
318
    """Rename this device.
319

320
    This may or may not make sense for a given device type.
321

322
    """
323
    raise NotImplementedError
324

    
325
  def Open(self, force=False):
326
    """Make the device ready for use.
327

328
    This makes the device ready for I/O. For now, just the DRBD
329
    devices need this.
330

331
    The force parameter signifies that if the device has any kind of
332
    --force thing, it should be used, we know what we are doing.
333

334
    """
335
    raise NotImplementedError
336

    
337
  def Shutdown(self):
338
    """Shut down the device, freeing its children.
339

340
    This undoes the `Assemble()` work, except for the child
341
    assembling; as such, the children on the device are still
342
    assembled after this call.
343

344
    """
345
    raise NotImplementedError
346

    
347
  def SetSyncParams(self, params):
348
    """Adjust the synchronization parameters of the mirror.
349

350
    In case this is not a mirroring device, this is no-op.
351

352
    @param params: dictionary of LD level disk parameters related to the
353
    synchronization.
354
    @rtype: list
355
    @return: a list of error messages, emitted both by the current node and by
356
    children. An empty list means no errors.
357

358
    """
359
    result = []
360
    if self._children:
361
      for child in self._children:
362
        result.extend(child.SetSyncParams(params))
363
    return result
364

    
365
  def PauseResumeSync(self, pause):
366
    """Pause/Resume the sync of the mirror.
367

368
    In case this is not a mirroring device, this is no-op.
369

370
    @param pause: Whether to pause or resume
371

372
    """
373
    result = True
374
    if self._children:
375
      for child in self._children:
376
        result = result and child.PauseResumeSync(pause)
377
    return result
378

    
379
  def GetSyncStatus(self):
380
    """Returns the sync status of the device.
381

382
    If this device is a mirroring device, this function returns the
383
    status of the mirror.
384

385
    If sync_percent is None, it means the device is not syncing.
386

387
    If estimated_time is None, it means we can't estimate
388
    the time needed, otherwise it's the time left in seconds.
389

390
    If is_degraded is True, it means the device is missing
391
    redundancy. This is usually a sign that something went wrong in
392
    the device setup, if sync_percent is None.
393

394
    The ldisk parameter represents the degradation of the local
395
    data. This is only valid for some devices, the rest will always
396
    return False (not degraded).
397

398
    @rtype: objects.BlockDevStatus
399

400
    """
401
    return objects.BlockDevStatus(dev_path=self.dev_path,
402
                                  major=self.major,
403
                                  minor=self.minor,
404
                                  sync_percent=None,
405
                                  estimated_time=None,
406
                                  is_degraded=False,
407
                                  ldisk_status=constants.LDS_OKAY)
408

    
409
  def CombinedSyncStatus(self):
410
    """Calculate the mirror status recursively for our children.
411

412
    The return value is the same as for `GetSyncStatus()` except the
413
    minimum percent and maximum time are calculated across our
414
    children.
415

416
    @rtype: objects.BlockDevStatus
417

418
    """
419
    status = self.GetSyncStatus()
420

    
421
    min_percent = status.sync_percent
422
    max_time = status.estimated_time
423
    is_degraded = status.is_degraded
424
    ldisk_status = status.ldisk_status
425

    
426
    if self._children:
427
      for child in self._children:
428
        child_status = child.GetSyncStatus()
429

    
430
        if min_percent is None:
431
          min_percent = child_status.sync_percent
432
        elif child_status.sync_percent is not None:
433
          min_percent = min(min_percent, child_status.sync_percent)
434

    
435
        if max_time is None:
436
          max_time = child_status.estimated_time
437
        elif child_status.estimated_time is not None:
438
          max_time = max(max_time, child_status.estimated_time)
439

    
440
        is_degraded = is_degraded or child_status.is_degraded
441

    
442
        if ldisk_status is None:
443
          ldisk_status = child_status.ldisk_status
444
        elif child_status.ldisk_status is not None:
445
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
446

    
447
    return objects.BlockDevStatus(dev_path=self.dev_path,
448
                                  major=self.major,
449
                                  minor=self.minor,
450
                                  sync_percent=min_percent,
451
                                  estimated_time=max_time,
452
                                  is_degraded=is_degraded,
453
                                  ldisk_status=ldisk_status)
454

    
455
  def SetInfo(self, text):
456
    """Update metadata with info text.
457

458
    Only supported for some device types.
459

460
    """
461
    for child in self._children:
462
      child.SetInfo(text)
463

    
464
  def Grow(self, amount, dryrun, backingstore):
465
    """Grow the block device.
466

467
    @type amount: integer
468
    @param amount: the amount (in mebibytes) to grow with
469
    @type dryrun: boolean
470
    @param dryrun: whether to execute the operation in simulation mode
471
        only, without actually increasing the size
472
    @param backingstore: whether to execute the operation on backing storage
473
        only, or on "logical" storage only; e.g. DRBD is logical storage,
474
        whereas LVM, file, RBD are backing storage
475

476
    """
477
    raise NotImplementedError
478

    
479
  def GetActualSize(self):
480
    """Return the actual disk size.
481

482
    @note: the device needs to be active when this is called
483

484
    """
485
    assert self.attached, "BlockDevice not attached in GetActualSize()"
486
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
487
    if result.failed:
488
      _ThrowError("blockdev failed (%s): %s",
489
                  result.fail_reason, result.output)
490
    try:
491
      sz = int(result.output.strip())
492
    except (ValueError, TypeError), err:
493
      _ThrowError("Failed to parse blockdev output: %s", str(err))
494
    return sz
495

    
496
  def __repr__(self):
497
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
498
            (self.__class__, self.unique_id, self._children,
499
             self.major, self.minor, self.dev_path))
500

    
501

    
502
class LogicalVolume(BlockDev):
503
  """Logical Volume block device.
504

505
  """
506
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
507
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
508
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
509

    
510
  def __init__(self, unique_id, children, size, params):
511
    """Attaches to a LV device.
512

513
    The unique_id is a tuple (vg_name, lv_name)
514

515
    """
516
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
517
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
518
      raise ValueError("Invalid configuration data %s" % str(unique_id))
519
    self._vg_name, self._lv_name = unique_id
520
    self._ValidateName(self._vg_name)
521
    self._ValidateName(self._lv_name)
522
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
523
    self._degraded = True
524
    self.major = self.minor = self.pe_size = self.stripe_count = None
525
    self.Attach()
526

    
527
  @staticmethod
528
  def _GetStdPvSize(pvs_info):
529
    """Return the the standard PV size (used with exclusive storage).
530

531
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
532
    @rtype: float
533
    @return: size in MiB
534

535
    """
536
    assert len(pvs_info) > 0
537
    smallest = min([pv.size for pv in pvs_info])
538
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
539

    
540
  @staticmethod
541
  def _ComputeNumPvs(size, pvs_info):
542
    """Compute the number of PVs needed for an LV (with exclusive storage).
543

544
    @type size: float
545
    @param size: LV size
546
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
547
    @rtype: integer
548
    @return: number of PVs needed
549
    """
550
    assert len(pvs_info) > 0
551
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
552
    return int(math.ceil(float(size) / pv_size))
553

    
554
  @staticmethod
555
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
556
    """Return a list of empty PVs, by name.
557

558
    """
559
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
560
    if max_pvs is not None:
561
      empty_pvs = empty_pvs[:max_pvs]
562
    return map((lambda pv: pv.name), empty_pvs)
563

    
564
  @classmethod
565
  def Create(cls, unique_id, children, size, params, excl_stor):
566
    """Create a new logical volume.
567

568
    """
569
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
570
      raise errors.ProgrammerError("Invalid configuration data %s" %
571
                                   str(unique_id))
572
    vg_name, lv_name = unique_id
573
    cls._ValidateName(vg_name)
574
    cls._ValidateName(lv_name)
575
    pvs_info = cls.GetPVInfo([vg_name])
576
    if not pvs_info:
577
      if excl_stor:
578
        msg = "No (empty) PVs found"
579
      else:
580
        msg = "Can't compute PV info for vg %s" % vg_name
581
      _ThrowError(msg)
582
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
583

    
584
    pvlist = [pv.name for pv in pvs_info]
585
    if compat.any(":" in v for v in pvlist):
586
      _ThrowError("Some of your PVs have the invalid character ':' in their"
587
                  " name, this is not supported - please filter them out"
588
                  " in lvm.conf using either 'filter' or 'preferred_names'")
589

    
590
    current_pvs = len(pvlist)
591
    desired_stripes = params[constants.LDP_STRIPES]
592
    stripes = min(current_pvs, desired_stripes)
593

    
594
    if excl_stor:
595
      err_msgs = utils.LvmExclusiveCheckNodePvs(pvs_info)
596
      if err_msgs:
597
        for m in err_msgs:
598
          logging.warning(m)
599
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
600
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
601
      current_pvs = len(pvlist)
602
      if current_pvs < req_pvs:
603
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
604
                    " %d available, %d needed", size, current_pvs, req_pvs)
605
      assert current_pvs == len(pvlist)
606
      if stripes > current_pvs:
607
        # No warning issued for this, as it's no surprise
608
        stripes = current_pvs
609

    
610
    else:
611
      if stripes < desired_stripes:
612
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
613
                        " available.", desired_stripes, vg_name, current_pvs)
614
      free_size = sum([pv.free for pv in pvs_info])
615
      # The size constraint should have been checked from the master before
616
      # calling the create function.
617
      if free_size < size:
618
        _ThrowError("Not enough free space: required %s,"
619
                    " available %s", size, free_size)
620

    
621
    # If the free space is not well distributed, we won't be able to
622
    # create an optimally-striped volume; in that case, we want to try
623
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
624
    # stripes
625
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
626
    for stripes_arg in range(stripes, 0, -1):
627
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
628
      if not result.failed:
629
        break
630
    if result.failed:
631
      _ThrowError("LV create failed (%s): %s",
632
                  result.fail_reason, result.output)
633
    return LogicalVolume(unique_id, children, size, params)
634

    
635
  @staticmethod
636
  def _GetVolumeInfo(lvm_cmd, fields):
637
    """Returns LVM Volumen infos using lvm_cmd
638

639
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
640
    @param fields: Fields to return
641
    @return: A list of dicts each with the parsed fields
642

643
    """
644
    if not fields:
645
      raise errors.ProgrammerError("No fields specified")
646

    
647
    sep = "|"
648
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
649
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
650

    
651
    result = utils.RunCmd(cmd)
652
    if result.failed:
653
      raise errors.CommandError("Can't get the volume information: %s - %s" %
654
                                (result.fail_reason, result.output))
655

    
656
    data = []
657
    for line in result.stdout.splitlines():
658
      splitted_fields = line.strip().split(sep)
659

    
660
      if len(fields) != len(splitted_fields):
661
        raise errors.CommandError("Can't parse %s output: line '%s'" %
662
                                  (lvm_cmd, line))
663

    
664
      data.append(splitted_fields)
665

    
666
    return data
667

    
668
  @classmethod
669
  def GetPVInfo(cls, vg_names, filter_allocatable=True):
670
    """Get the free space info for PVs in a volume group.
671

672
    @param vg_names: list of volume group names, if empty all will be returned
673
    @param filter_allocatable: whether to skip over unallocatable PVs
674

675
    @rtype: list
676
    @return: list of objects.LvmPvInfo objects
677

678
    """
679
    try:
680
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
681
                                        "pv_attr", "pv_size"])
682
    except errors.GenericError, err:
683
      logging.error("Can't get PV information: %s", err)
684
      return None
685

    
686
    data = []
687
    for (pv_name, vg_name, pv_free, pv_attr, pv_size) in info:
688
      # (possibly) skip over pvs which are not allocatable
689
      if filter_allocatable and pv_attr[0] != "a":
690
        continue
691
      # (possibly) skip over pvs which are not in the right volume group(s)
692
      if vg_names and vg_name not in vg_names:
693
        continue
694
      pvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
695
                              size=float(pv_size), free=float(pv_free),
696
                              attributes=pv_attr)
697
      data.append(pvi)
698

    
699
    return data
700

    
701
  @classmethod
702
  def GetVGInfo(cls, vg_names, filter_readonly=True):
703
    """Get the free space info for specific VGs.
704

705
    @param vg_names: list of volume group names, if empty all will be returned
706
    @param filter_readonly: whether to skip over readonly VGs
707

708
    @rtype: list
709
    @return: list of tuples (free_space, total_size, name) with free_space in
710
             MiB
711

712
    """
713
    try:
714
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
715
                                        "vg_size"])
716
    except errors.GenericError, err:
717
      logging.error("Can't get VG information: %s", err)
718
      return None
719

    
720
    data = []
721
    for vg_name, vg_free, vg_attr, vg_size in info:
722
      # (possibly) skip over vgs which are not writable
723
      if filter_readonly and vg_attr[0] == "r":
724
        continue
725
      # (possibly) skip over vgs which are not in the right volume group(s)
726
      if vg_names and vg_name not in vg_names:
727
        continue
728
      data.append((float(vg_free), float(vg_size), vg_name))
729

    
730
    return data
731

    
732
  @classmethod
733
  def _ValidateName(cls, name):
734
    """Validates that a given name is valid as VG or LV name.
735

736
    The list of valid characters and restricted names is taken out of
737
    the lvm(8) manpage, with the simplification that we enforce both
738
    VG and LV restrictions on the names.
739

740
    """
741
    if (not cls._VALID_NAME_RE.match(name) or
742
        name in cls._INVALID_NAMES or
743
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
744
      _ThrowError("Invalid LVM name '%s'", name)
745

    
746
  def Remove(self):
747
    """Remove this logical volume.
748

749
    """
750
    if not self.minor and not self.Attach():
751
      # the LV does not exist
752
      return
753
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
754
                           (self._vg_name, self._lv_name)])
755
    if result.failed:
756
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
757

    
758
  def Rename(self, new_id):
759
    """Rename this logical volume.
760

761
    """
762
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
763
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
764
    new_vg, new_name = new_id
765
    if new_vg != self._vg_name:
766
      raise errors.ProgrammerError("Can't move a logical volume across"
767
                                   " volume groups (from %s to to %s)" %
768
                                   (self._vg_name, new_vg))
769
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
770
    if result.failed:
771
      _ThrowError("Failed to rename the logical volume: %s", result.output)
772
    self._lv_name = new_name
773
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
774

    
775
  def Attach(self):
776
    """Attach to an existing LV.
777

778
    This method will try to see if an existing and active LV exists
779
    which matches our name. If so, its major/minor will be
780
    recorded.
781

782
    """
783
    self.attached = False
784
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
785
                           "--units=m", "--nosuffix",
786
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
787
                           "vg_extent_size,stripes", self.dev_path])
788
    if result.failed:
789
      logging.error("Can't find LV %s: %s, %s",
790
                    self.dev_path, result.fail_reason, result.output)
791
      return False
792
    # the output can (and will) have multiple lines for multi-segment
793
    # LVs, as the 'stripes' parameter is a segment one, so we take
794
    # only the last entry, which is the one we're interested in; note
795
    # that with LVM2 anyway the 'stripes' value must be constant
796
    # across segments, so this is a no-op actually
797
    out = result.stdout.splitlines()
798
    if not out: # totally empty result? splitlines() returns at least
799
                # one line for any non-empty string
800
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
801
      return False
802
    out = out[-1].strip().rstrip(",")
803
    out = out.split(",")
804
    if len(out) != 5:
805
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
806
      return False
807

    
808
    status, major, minor, pe_size, stripes = out
809
    if len(status) < 6:
810
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
811
      return False
812

    
813
    try:
814
      major = int(major)
815
      minor = int(minor)
816
    except (TypeError, ValueError), err:
817
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
818

    
819
    try:
820
      pe_size = int(float(pe_size))
821
    except (TypeError, ValueError), err:
822
      logging.error("Can't parse vg extent size: %s", err)
823
      return False
824

    
825
    try:
826
      stripes = int(stripes)
827
    except (TypeError, ValueError), err:
828
      logging.error("Can't parse the number of stripes: %s", err)
829
      return False
830

    
831
    self.major = major
832
    self.minor = minor
833
    self.pe_size = pe_size
834
    self.stripe_count = stripes
835
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
836
                                      # storage
837
    self.attached = True
838
    return True
839

    
840
  def Assemble(self):
841
    """Assemble the device.
842

843
    We always run `lvchange -ay` on the LV to ensure it's active before
844
    use, as there were cases when xenvg was not active after boot
845
    (also possibly after disk issues).
846

847
    """
848
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
849
    if result.failed:
850
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
851

    
852
  def Shutdown(self):
853
    """Shutdown the device.
854

855
    This is a no-op for the LV device type, as we don't deactivate the
856
    volumes on shutdown.
857

858
    """
859
    pass
860

    
861
  def GetSyncStatus(self):
862
    """Returns the sync status of the device.
863

864
    If this device is a mirroring device, this function returns the
865
    status of the mirror.
866

867
    For logical volumes, sync_percent and estimated_time are always
868
    None (no recovery in progress, as we don't handle the mirrored LV
869
    case). The is_degraded parameter is the inverse of the ldisk
870
    parameter.
871

872
    For the ldisk parameter, we check if the logical volume has the
873
    'virtual' type, which means it's not backed by existing storage
874
    anymore (read from it return I/O error). This happens after a
875
    physical disk failure and subsequent 'vgreduce --removemissing' on
876
    the volume group.
877

878
    The status was already read in Attach, so we just return it.
879

880
    @rtype: objects.BlockDevStatus
881

882
    """
883
    if self._degraded:
884
      ldisk_status = constants.LDS_FAULTY
885
    else:
886
      ldisk_status = constants.LDS_OKAY
887

    
888
    return objects.BlockDevStatus(dev_path=self.dev_path,
889
                                  major=self.major,
890
                                  minor=self.minor,
891
                                  sync_percent=None,
892
                                  estimated_time=None,
893
                                  is_degraded=self._degraded,
894
                                  ldisk_status=ldisk_status)
895

    
896
  def Open(self, force=False):
897
    """Make the device ready for I/O.
898

899
    This is a no-op for the LV device type.
900

901
    """
902
    pass
903

    
904
  def Close(self):
905
    """Notifies that the device will no longer be used for I/O.
906

907
    This is a no-op for the LV device type.
908

909
    """
910
    pass
911

    
912
  def Snapshot(self, size):
913
    """Create a snapshot copy of an lvm block device.
914

915
    @returns: tuple (vg, lv)
916

917
    """
918
    snap_name = self._lv_name + ".snap"
919

    
920
    # remove existing snapshot if found
921
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
922
    _IgnoreError(snap.Remove)
923

    
924
    vg_info = self.GetVGInfo([self._vg_name])
925
    if not vg_info:
926
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
927
    free_size, _, _ = vg_info[0]
928
    if free_size < size:
929
      _ThrowError("Not enough free space: required %s,"
930
                  " available %s", size, free_size)
931

    
932
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
933
                               "-n%s" % snap_name, self.dev_path]))
934

    
935
    return (self._vg_name, snap_name)
936

    
937
  def _RemoveOldInfo(self):
938
    """Try to remove old tags from the lv.
939

940
    """
941
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
942
                           self.dev_path])
943
    _CheckResult(result)
944

    
945
    raw_tags = result.stdout.strip()
946
    if raw_tags:
947
      for tag in raw_tags.split(","):
948
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
949
                                   tag.strip(), self.dev_path]))
950

    
951
  def SetInfo(self, text):
952
    """Update metadata with info text.
953

954
    """
955
    BlockDev.SetInfo(self, text)
956

    
957
    self._RemoveOldInfo()
958

    
959
    # Replace invalid characters
960
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
961
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
962

    
963
    # Only up to 128 characters are allowed
964
    text = text[:128]
965

    
966
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
967

    
968
  def Grow(self, amount, dryrun, backingstore):
969
    """Grow the logical volume.
970

971
    """
972
    if not backingstore:
973
      return
974
    if self.pe_size is None or self.stripe_count is None:
975
      if not self.Attach():
976
        _ThrowError("Can't attach to LV during Grow()")
977
    full_stripe_size = self.pe_size * self.stripe_count
978
    rest = amount % full_stripe_size
979
    if rest != 0:
980
      amount += full_stripe_size - rest
981
    cmd = ["lvextend", "-L", "+%dm" % amount]
982
    if dryrun:
983
      cmd.append("--test")
984
    # we try multiple algorithms since the 'best' ones might not have
985
    # space available in the right place, but later ones might (since
986
    # they have less constraints); also note that only recent LVM
987
    # supports 'cling'
988
    for alloc_policy in "contiguous", "cling", "normal":
989
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
990
      if not result.failed:
991
        return
992
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
993

    
994

    
995
class DRBD8Status(object):
996
  """A DRBD status representation class.
997

998
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
999

1000
  """
1001
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1002
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1003
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1004
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1005
                       # Due to a bug in drbd in the kernel, introduced in
1006
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1007
                       "(?:\s|M)"
1008
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1009

    
1010
  CS_UNCONFIGURED = "Unconfigured"
1011
  CS_STANDALONE = "StandAlone"
1012
  CS_WFCONNECTION = "WFConnection"
1013
  CS_WFREPORTPARAMS = "WFReportParams"
1014
  CS_CONNECTED = "Connected"
1015
  CS_STARTINGSYNCS = "StartingSyncS"
1016
  CS_STARTINGSYNCT = "StartingSyncT"
1017
  CS_WFBITMAPS = "WFBitMapS"
1018
  CS_WFBITMAPT = "WFBitMapT"
1019
  CS_WFSYNCUUID = "WFSyncUUID"
1020
  CS_SYNCSOURCE = "SyncSource"
1021
  CS_SYNCTARGET = "SyncTarget"
1022
  CS_PAUSEDSYNCS = "PausedSyncS"
1023
  CS_PAUSEDSYNCT = "PausedSyncT"
1024
  CSET_SYNC = compat.UniqueFrozenset([
1025
    CS_WFREPORTPARAMS,
1026
    CS_STARTINGSYNCS,
1027
    CS_STARTINGSYNCT,
1028
    CS_WFBITMAPS,
1029
    CS_WFBITMAPT,
1030
    CS_WFSYNCUUID,
1031
    CS_SYNCSOURCE,
1032
    CS_SYNCTARGET,
1033
    CS_PAUSEDSYNCS,
1034
    CS_PAUSEDSYNCT,
1035
    ])
1036

    
1037
  DS_DISKLESS = "Diskless"
1038
  DS_ATTACHING = "Attaching" # transient state
1039
  DS_FAILED = "Failed" # transient state, next: diskless
1040
  DS_NEGOTIATING = "Negotiating" # transient state
1041
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1042
  DS_OUTDATED = "Outdated"
1043
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1044
  DS_CONSISTENT = "Consistent"
1045
  DS_UPTODATE = "UpToDate" # normal state
1046

    
1047
  RO_PRIMARY = "Primary"
1048
  RO_SECONDARY = "Secondary"
1049
  RO_UNKNOWN = "Unknown"
1050

    
1051
  def __init__(self, procline):
1052
    u = self.UNCONF_RE.match(procline)
1053
    if u:
1054
      self.cstatus = self.CS_UNCONFIGURED
1055
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1056
    else:
1057
      m = self.LINE_RE.match(procline)
1058
      if not m:
1059
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1060
      self.cstatus = m.group(1)
1061
      self.lrole = m.group(2)
1062
      self.rrole = m.group(3)
1063
      self.ldisk = m.group(4)
1064
      self.rdisk = m.group(5)
1065

    
1066
    # end reading of data from the LINE_RE or UNCONF_RE
1067

    
1068
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1069
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1070
    self.is_connected = self.cstatus == self.CS_CONNECTED
1071
    self.is_primary = self.lrole == self.RO_PRIMARY
1072
    self.is_secondary = self.lrole == self.RO_SECONDARY
1073
    self.peer_primary = self.rrole == self.RO_PRIMARY
1074
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1075
    self.both_primary = self.is_primary and self.peer_primary
1076
    self.both_secondary = self.is_secondary and self.peer_secondary
1077

    
1078
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1079
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1080

    
1081
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1082
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1083

    
1084
    m = self.SYNC_RE.match(procline)
1085
    if m:
1086
      self.sync_percent = float(m.group(1))
1087
      hours = int(m.group(2))
1088
      minutes = int(m.group(3))
1089
      seconds = int(m.group(4))
1090
      self.est_time = hours * 3600 + minutes * 60 + seconds
1091
    else:
1092
      # we have (in this if branch) no percent information, but if
1093
      # we're resyncing we need to 'fake' a sync percent information,
1094
      # as this is how cmdlib determines if it makes sense to wait for
1095
      # resyncing or not
1096
      if self.is_in_resync:
1097
        self.sync_percent = 0
1098
      else:
1099
        self.sync_percent = None
1100
      self.est_time = None
1101

    
1102

    
1103
class BaseDRBD(BlockDev): # pylint: disable=W0223
1104
  """Base DRBD class.
1105

1106
  This class contains a few bits of common functionality between the
1107
  0.7 and 8.x versions of DRBD.
1108

1109
  """
1110
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1111
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1112
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1113
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1114

    
1115
  _DRBD_MAJOR = 147
1116
  _ST_UNCONFIGURED = "Unconfigured"
1117
  _ST_WFCONNECTION = "WFConnection"
1118
  _ST_CONNECTED = "Connected"
1119

    
1120
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1121
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1122

    
1123
  @staticmethod
1124
  def _GetProcData(filename=_STATUS_FILE):
1125
    """Return data from /proc/drbd.
1126

1127
    """
1128
    try:
1129
      data = utils.ReadFile(filename).splitlines()
1130
    except EnvironmentError, err:
1131
      if err.errno == errno.ENOENT:
1132
        _ThrowError("The file %s cannot be opened, check if the module"
1133
                    " is loaded (%s)", filename, str(err))
1134
      else:
1135
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1136
    if not data:
1137
      _ThrowError("Can't read any data from %s", filename)
1138
    return data
1139

    
1140
  @classmethod
1141
  def _MassageProcData(cls, data):
1142
    """Transform the output of _GetProdData into a nicer form.
1143

1144
    @return: a dictionary of minor: joined lines from /proc/drbd
1145
        for that minor
1146

1147
    """
1148
    results = {}
1149
    old_minor = old_line = None
1150
    for line in data:
1151
      if not line: # completely empty lines, as can be returned by drbd8.0+
1152
        continue
1153
      lresult = cls._VALID_LINE_RE.match(line)
1154
      if lresult is not None:
1155
        if old_minor is not None:
1156
          results[old_minor] = old_line
1157
        old_minor = int(lresult.group(1))
1158
        old_line = line
1159
      else:
1160
        if old_minor is not None:
1161
          old_line += " " + line.strip()
1162
    # add last line
1163
    if old_minor is not None:
1164
      results[old_minor] = old_line
1165
    return results
1166

    
1167
  @classmethod
1168
  def _GetVersion(cls, proc_data):
1169
    """Return the DRBD version.
1170

1171
    This will return a dict with keys:
1172
      - k_major
1173
      - k_minor
1174
      - k_point
1175
      - api
1176
      - proto
1177
      - proto2 (only on drbd > 8.2.X)
1178

1179
    """
1180
    first_line = proc_data[0].strip()
1181
    version = cls._VERSION_RE.match(first_line)
1182
    if not version:
1183
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1184
                                    first_line)
1185

    
1186
    values = version.groups()
1187
    retval = {
1188
      "k_major": int(values[0]),
1189
      "k_minor": int(values[1]),
1190
      "k_point": int(values[2]),
1191
      "api": int(values[3]),
1192
      "proto": int(values[4]),
1193
      }
1194
    if values[5] is not None:
1195
      retval["proto2"] = values[5]
1196

    
1197
    return retval
1198

    
1199
  @staticmethod
1200
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1201
    """Returns DRBD usermode_helper currently set.
1202

1203
    """
1204
    try:
1205
      helper = utils.ReadFile(filename).splitlines()[0]
1206
    except EnvironmentError, err:
1207
      if err.errno == errno.ENOENT:
1208
        _ThrowError("The file %s cannot be opened, check if the module"
1209
                    " is loaded (%s)", filename, str(err))
1210
      else:
1211
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1212
    if not helper:
1213
      _ThrowError("Can't read any data from %s", filename)
1214
    return helper
1215

    
1216
  @staticmethod
1217
  def _DevPath(minor):
1218
    """Return the path to a drbd device for a given minor.
1219

1220
    """
1221
    return "/dev/drbd%d" % minor
1222

    
1223
  @classmethod
1224
  def GetUsedDevs(cls):
1225
    """Compute the list of used DRBD devices.
1226

1227
    """
1228
    data = cls._GetProcData()
1229

    
1230
    used_devs = {}
1231
    for line in data:
1232
      match = cls._VALID_LINE_RE.match(line)
1233
      if not match:
1234
        continue
1235
      minor = int(match.group(1))
1236
      state = match.group(2)
1237
      if state == cls._ST_UNCONFIGURED:
1238
        continue
1239
      used_devs[minor] = state, line
1240

    
1241
    return used_devs
1242

    
1243
  def _SetFromMinor(self, minor):
1244
    """Set our parameters based on the given minor.
1245

1246
    This sets our minor variable and our dev_path.
1247

1248
    """
1249
    if minor is None:
1250
      self.minor = self.dev_path = None
1251
      self.attached = False
1252
    else:
1253
      self.minor = minor
1254
      self.dev_path = self._DevPath(minor)
1255
      self.attached = True
1256

    
1257
  @staticmethod
1258
  def _CheckMetaSize(meta_device):
1259
    """Check if the given meta device looks like a valid one.
1260

1261
    This currently only checks the size, which must be around
1262
    128MiB.
1263

1264
    """
1265
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1266
    if result.failed:
1267
      _ThrowError("Failed to get device size: %s - %s",
1268
                  result.fail_reason, result.output)
1269
    try:
1270
      sectors = int(result.stdout)
1271
    except (TypeError, ValueError):
1272
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1273
    num_bytes = sectors * 512
1274
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1275
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1276
    # the maximum *valid* size of the meta device when living on top
1277
    # of LVM is hard to compute: it depends on the number of stripes
1278
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1279
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1280
    # size meta device; as such, we restrict it to 1GB (a little bit
1281
    # too generous, but making assumptions about PE size is hard)
1282
    if num_bytes > 1024 * 1024 * 1024:
1283
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1284

    
1285
  def Rename(self, new_id):
1286
    """Rename a device.
1287

1288
    This is not supported for drbd devices.
1289

1290
    """
1291
    raise errors.ProgrammerError("Can't rename a drbd device")
1292

    
1293

    
1294
class DRBD8(BaseDRBD):
1295
  """DRBD v8.x block device.
1296

1297
  This implements the local host part of the DRBD device, i.e. it
1298
  doesn't do anything to the supposed peer. If you need a fully
1299
  connected DRBD pair, you need to use this class on both hosts.
1300

1301
  The unique_id for the drbd device is a (local_ip, local_port,
1302
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1303
  two children: the data device and the meta_device. The meta device
1304
  is checked for valid size and is zeroed on create.
1305

1306
  """
1307
  _MAX_MINORS = 255
1308
  _PARSE_SHOW = None
1309

    
1310
  # timeout constants
1311
  _NET_RECONFIG_TIMEOUT = 60
1312

    
1313
  # command line options for barriers
1314
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1315
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1316
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1317
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1318

    
1319
  def __init__(self, unique_id, children, size, params):
1320
    if children and children.count(None) > 0:
1321
      children = []
1322
    if len(children) not in (0, 2):
1323
      raise ValueError("Invalid configuration data %s" % str(children))
1324
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1325
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1326
    (self._lhost, self._lport,
1327
     self._rhost, self._rport,
1328
     self._aminor, self._secret) = unique_id
1329
    if children:
1330
      if not _CanReadDevice(children[1].dev_path):
1331
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1332
        children = []
1333
    super(DRBD8, self).__init__(unique_id, children, size, params)
1334
    self.major = self._DRBD_MAJOR
1335
    version = self._GetVersion(self._GetProcData())
1336
    if version["k_major"] != 8:
1337
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1338
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1339
                  version["k_major"], version["k_minor"])
1340

    
1341
    if (self._lhost is not None and self._lhost == self._rhost and
1342
        self._lport == self._rport):
1343
      raise ValueError("Invalid configuration data, same local/remote %s" %
1344
                       (unique_id,))
1345
    self.Attach()
1346

    
1347
  @classmethod
1348
  def _InitMeta(cls, minor, dev_path):
1349
    """Initialize a meta device.
1350

1351
    This will not work if the given minor is in use.
1352

1353
    """
1354
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1355
    # try to auto-detect existing filesystems or similar (see
1356
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1357
    # care about the first 128MB of data in the device, even though it
1358
    # can be bigger
1359
    result = utils.RunCmd([constants.DD_CMD,
1360
                           "if=/dev/zero", "of=%s" % dev_path,
1361
                           "bs=1048576", "count=128", "oflag=direct"])
1362
    if result.failed:
1363
      _ThrowError("Can't wipe the meta device: %s", result.output)
1364

    
1365
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1366
                           "v08", dev_path, "0", "create-md"])
1367
    if result.failed:
1368
      _ThrowError("Can't initialize meta device: %s", result.output)
1369

    
1370
  @classmethod
1371
  def _FindUnusedMinor(cls):
1372
    """Find an unused DRBD device.
1373

1374
    This is specific to 8.x as the minors are allocated dynamically,
1375
    so non-existing numbers up to a max minor count are actually free.
1376

1377
    """
1378
    data = cls._GetProcData()
1379

    
1380
    highest = None
1381
    for line in data:
1382
      match = cls._UNUSED_LINE_RE.match(line)
1383
      if match:
1384
        return int(match.group(1))
1385
      match = cls._VALID_LINE_RE.match(line)
1386
      if match:
1387
        minor = int(match.group(1))
1388
        highest = max(highest, minor)
1389
    if highest is None: # there are no minors in use at all
1390
      return 0
1391
    if highest >= cls._MAX_MINORS:
1392
      logging.error("Error: no free drbd minors!")
1393
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1394
    return highest + 1
1395

    
1396
  @classmethod
1397
  def _GetShowParser(cls):
1398
    """Return a parser for `drbd show` output.
1399

1400
    This will either create or return an already-created parser for the
1401
    output of the command `drbd show`.
1402

1403
    """
1404
    if cls._PARSE_SHOW is not None:
1405
      return cls._PARSE_SHOW
1406

    
1407
    # pyparsing setup
1408
    lbrace = pyp.Literal("{").suppress()
1409
    rbrace = pyp.Literal("}").suppress()
1410
    lbracket = pyp.Literal("[").suppress()
1411
    rbracket = pyp.Literal("]").suppress()
1412
    semi = pyp.Literal(";").suppress()
1413
    colon = pyp.Literal(":").suppress()
1414
    # this also converts the value to an int
1415
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1416

    
1417
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1418
    defa = pyp.Literal("_is_default").suppress()
1419
    dbl_quote = pyp.Literal('"').suppress()
1420

    
1421
    keyword = pyp.Word(pyp.alphanums + "-")
1422

    
1423
    # value types
1424
    value = pyp.Word(pyp.alphanums + "_-/.:")
1425
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1426
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1427
                 pyp.Word(pyp.nums + ".") + colon + number)
1428
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1429
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1430
                 pyp.Optional(rbracket) + colon + number)
1431
    # meta device, extended syntax
1432
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1433
    # device name, extended syntax
1434
    device_value = pyp.Literal("minor").suppress() + number
1435

    
1436
    # a statement
1437
    stmt = (~rbrace + keyword + ~lbrace +
1438
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1439
                         device_value) +
1440
            pyp.Optional(defa) + semi +
1441
            pyp.Optional(pyp.restOfLine).suppress())
1442

    
1443
    # an entire section
1444
    section_name = pyp.Word(pyp.alphas + "_")
1445
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1446

    
1447
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1448
    bnf.ignore(comment)
1449

    
1450
    cls._PARSE_SHOW = bnf
1451

    
1452
    return bnf
1453

    
1454
  @classmethod
1455
  def _GetShowData(cls, minor):
1456
    """Return the `drbdsetup show` data for a minor.
1457

1458
    """
1459
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1460
    if result.failed:
1461
      logging.error("Can't display the drbd config: %s - %s",
1462
                    result.fail_reason, result.output)
1463
      return None
1464
    return result.stdout
1465

    
1466
  @classmethod
1467
  def _GetDevInfo(cls, out):
1468
    """Parse details about a given DRBD minor.
1469

1470
    This return, if available, the local backing device (as a path)
1471
    and the local and remote (ip, port) information from a string
1472
    containing the output of the `drbdsetup show` command as returned
1473
    by _GetShowData.
1474

1475
    """
1476
    data = {}
1477
    if not out:
1478
      return data
1479

    
1480
    bnf = cls._GetShowParser()
1481
    # run pyparse
1482

    
1483
    try:
1484
      results = bnf.parseString(out)
1485
    except pyp.ParseException, err:
1486
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1487

    
1488
    # and massage the results into our desired format
1489
    for section in results:
1490
      sname = section[0]
1491
      if sname == "_this_host":
1492
        for lst in section[1:]:
1493
          if lst[0] == "disk":
1494
            data["local_dev"] = lst[1]
1495
          elif lst[0] == "meta-disk":
1496
            data["meta_dev"] = lst[1]
1497
            data["meta_index"] = lst[2]
1498
          elif lst[0] == "address":
1499
            data["local_addr"] = tuple(lst[1:])
1500
      elif sname == "_remote_host":
1501
        for lst in section[1:]:
1502
          if lst[0] == "address":
1503
            data["remote_addr"] = tuple(lst[1:])
1504
    return data
1505

    
1506
  def _MatchesLocal(self, info):
1507
    """Test if our local config matches with an existing device.
1508

1509
    The parameter should be as returned from `_GetDevInfo()`. This
1510
    method tests if our local backing device is the same as the one in
1511
    the info parameter, in effect testing if we look like the given
1512
    device.
1513

1514
    """
1515
    if self._children:
1516
      backend, meta = self._children
1517
    else:
1518
      backend = meta = None
1519

    
1520
    if backend is not None:
1521
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1522
    else:
1523
      retval = ("local_dev" not in info)
1524

    
1525
    if meta is not None:
1526
      retval = retval and ("meta_dev" in info and
1527
                           info["meta_dev"] == meta.dev_path)
1528
      retval = retval and ("meta_index" in info and
1529
                           info["meta_index"] == 0)
1530
    else:
1531
      retval = retval and ("meta_dev" not in info and
1532
                           "meta_index" not in info)
1533
    return retval
1534

    
1535
  def _MatchesNet(self, info):
1536
    """Test if our network config matches with an existing device.
1537

1538
    The parameter should be as returned from `_GetDevInfo()`. This
1539
    method tests if our network configuration is the same as the one
1540
    in the info parameter, in effect testing if we look like the given
1541
    device.
1542

1543
    """
1544
    if (((self._lhost is None and not ("local_addr" in info)) and
1545
         (self._rhost is None and not ("remote_addr" in info)))):
1546
      return True
1547

    
1548
    if self._lhost is None:
1549
      return False
1550

    
1551
    if not ("local_addr" in info and
1552
            "remote_addr" in info):
1553
      return False
1554

    
1555
    retval = (info["local_addr"] == (self._lhost, self._lport))
1556
    retval = (retval and
1557
              info["remote_addr"] == (self._rhost, self._rport))
1558
    return retval
1559

    
1560
  def _AssembleLocal(self, minor, backend, meta, size):
1561
    """Configure the local part of a DRBD device.
1562

1563
    """
1564
    args = ["drbdsetup", self._DevPath(minor), "disk",
1565
            backend, meta, "0",
1566
            "-e", "detach",
1567
            "--create-device"]
1568
    if size:
1569
      args.extend(["-d", "%sm" % size])
1570

    
1571
    version = self._GetVersion(self._GetProcData())
1572
    vmaj = version["k_major"]
1573
    vmin = version["k_minor"]
1574
    vrel = version["k_point"]
1575

    
1576
    barrier_args = \
1577
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1578
                                   self.params[constants.LDP_BARRIERS],
1579
                                   self.params[constants.LDP_NO_META_FLUSH])
1580
    args.extend(barrier_args)
1581

    
1582
    if self.params[constants.LDP_DISK_CUSTOM]:
1583
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1584

    
1585
    result = utils.RunCmd(args)
1586
    if result.failed:
1587
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1588

    
1589
  @classmethod
1590
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1591
                              disable_meta_flush):
1592
    """Compute the DRBD command line parameters for disk barriers
1593

1594
    Returns a list of the disk barrier parameters as requested via the
1595
    disabled_barriers and disable_meta_flush arguments, and according to the
1596
    supported ones in the DRBD version vmaj.vmin.vrel
1597

1598
    If the desired option is unsupported, raises errors.BlockDeviceError.
1599

1600
    """
1601
    disabled_barriers_set = frozenset(disabled_barriers)
1602
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1603
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1604
                                    " barriers" % disabled_barriers)
1605

    
1606
    args = []
1607

    
1608
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1609
    # does not exist)
1610
    if not vmaj == 8 and vmin in (0, 2, 3):
1611
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1612
                                    (vmaj, vmin, vrel))
1613

    
1614
    def _AppendOrRaise(option, min_version):
1615
      """Helper for DRBD options"""
1616
      if min_version is not None and vrel >= min_version:
1617
        args.append(option)
1618
      else:
1619
        raise errors.BlockDeviceError("Could not use the option %s as the"
1620
                                      " DRBD version %d.%d.%d does not support"
1621
                                      " it." % (option, vmaj, vmin, vrel))
1622

    
1623
    # the minimum version for each feature is encoded via pairs of (minor
1624
    # version -> x) where x is version in which support for the option was
1625
    # introduced.
1626
    meta_flush_supported = disk_flush_supported = {
1627
      0: 12,
1628
      2: 7,
1629
      3: 0,
1630
      }
1631

    
1632
    disk_drain_supported = {
1633
      2: 7,
1634
      3: 0,
1635
      }
1636

    
1637
    disk_barriers_supported = {
1638
      3: 0,
1639
      }
1640

    
1641
    # meta flushes
1642
    if disable_meta_flush:
1643
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1644
                     meta_flush_supported.get(vmin, None))
1645

    
1646
    # disk flushes
1647
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1648
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1649
                     disk_flush_supported.get(vmin, None))
1650

    
1651
    # disk drain
1652
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1653
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1654
                     disk_drain_supported.get(vmin, None))
1655

    
1656
    # disk barriers
1657
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1658
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1659
                     disk_barriers_supported.get(vmin, None))
1660

    
1661
    return args
1662

    
1663
  def _AssembleNet(self, minor, net_info, protocol,
1664
                   dual_pri=False, hmac=None, secret=None):
1665
    """Configure the network part of the device.
1666

1667
    """
1668
    lhost, lport, rhost, rport = net_info
1669
    if None in net_info:
1670
      # we don't want network connection and actually want to make
1671
      # sure its shutdown
1672
      self._ShutdownNet(minor)
1673
      return
1674

    
1675
    # Workaround for a race condition. When DRBD is doing its dance to
1676
    # establish a connection with its peer, it also sends the
1677
    # synchronization speed over the wire. In some cases setting the
1678
    # sync speed only after setting up both sides can race with DRBD
1679
    # connecting, hence we set it here before telling DRBD anything
1680
    # about its peer.
1681
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1682
    if sync_errors:
1683
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1684
                  (minor, utils.CommaJoin(sync_errors)))
1685

    
1686
    if netutils.IP6Address.IsValid(lhost):
1687
      if not netutils.IP6Address.IsValid(rhost):
1688
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1689
                    (minor, lhost, rhost))
1690
      family = "ipv6"
1691
    elif netutils.IP4Address.IsValid(lhost):
1692
      if not netutils.IP4Address.IsValid(rhost):
1693
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1694
                    (minor, lhost, rhost))
1695
      family = "ipv4"
1696
    else:
1697
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1698

    
1699
    args = ["drbdsetup", self._DevPath(minor), "net",
1700
            "%s:%s:%s" % (family, lhost, lport),
1701
            "%s:%s:%s" % (family, rhost, rport), protocol,
1702
            "-A", "discard-zero-changes",
1703
            "-B", "consensus",
1704
            "--create-device",
1705
            ]
1706
    if dual_pri:
1707
      args.append("-m")
1708
    if hmac and secret:
1709
      args.extend(["-a", hmac, "-x", secret])
1710

    
1711
    if self.params[constants.LDP_NET_CUSTOM]:
1712
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1713

    
1714
    result = utils.RunCmd(args)
1715
    if result.failed:
1716
      _ThrowError("drbd%d: can't setup network: %s - %s",
1717
                  minor, result.fail_reason, result.output)
1718

    
1719
    def _CheckNetworkConfig():
1720
      info = self._GetDevInfo(self._GetShowData(minor))
1721
      if not "local_addr" in info or not "remote_addr" in info:
1722
        raise utils.RetryAgain()
1723

    
1724
      if (info["local_addr"] != (lhost, lport) or
1725
          info["remote_addr"] != (rhost, rport)):
1726
        raise utils.RetryAgain()
1727

    
1728
    try:
1729
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1730
    except utils.RetryTimeout:
1731
      _ThrowError("drbd%d: timeout while configuring network", minor)
1732

    
1733
  def AddChildren(self, devices):
1734
    """Add a disk to the DRBD device.
1735

1736
    """
1737
    if self.minor is None:
1738
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1739
                  self._aminor)
1740
    if len(devices) != 2:
1741
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1742
    info = self._GetDevInfo(self._GetShowData(self.minor))
1743
    if "local_dev" in info:
1744
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1745
    backend, meta = devices
1746
    if backend.dev_path is None or meta.dev_path is None:
1747
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1748
    backend.Open()
1749
    meta.Open()
1750
    self._CheckMetaSize(meta.dev_path)
1751
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1752

    
1753
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1754
    self._children = devices
1755

    
1756
  def RemoveChildren(self, devices):
1757
    """Detach the drbd device from local storage.
1758

1759
    """
1760
    if self.minor is None:
1761
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1762
                  self._aminor)
1763
    # early return if we don't actually have backing storage
1764
    info = self._GetDevInfo(self._GetShowData(self.minor))
1765
    if "local_dev" not in info:
1766
      return
1767
    if len(self._children) != 2:
1768
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1769
                  self._children)
1770
    if self._children.count(None) == 2: # we don't actually have children :)
1771
      logging.warning("drbd%d: requested detach while detached", self.minor)
1772
      return
1773
    if len(devices) != 2:
1774
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1775
    for child, dev in zip(self._children, devices):
1776
      if dev != child.dev_path:
1777
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1778
                    " RemoveChildren", self.minor, dev, child.dev_path)
1779

    
1780
    self._ShutdownLocal(self.minor)
1781
    self._children = []
1782

    
1783
  @classmethod
1784
  def _SetMinorSyncParams(cls, minor, params):
1785
    """Set the parameters of the DRBD syncer.
1786

1787
    This is the low-level implementation.
1788

1789
    @type minor: int
1790
    @param minor: the drbd minor whose settings we change
1791
    @type params: dict
1792
    @param params: LD level disk parameters related to the synchronization
1793
    @rtype: list
1794
    @return: a list of error messages
1795

1796
    """
1797

    
1798
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1799
    if params[constants.LDP_DYNAMIC_RESYNC]:
1800
      version = cls._GetVersion(cls._GetProcData())
1801
      vmin = version["k_minor"]
1802
      vrel = version["k_point"]
1803

    
1804
      # By definition we are using 8.x, so just check the rest of the version
1805
      # number
1806
      if vmin != 3 or vrel < 9:
1807
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1808
               "dynamic resync speed controller" % (vmin, vrel))
1809
        logging.error(msg)
1810
        return [msg]
1811

    
1812
      if params[constants.LDP_PLAN_AHEAD] == 0:
1813
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1814
               " controller at DRBD level. If you want to disable it, please"
1815
               " set the dynamic-resync disk parameter to False.")
1816
        logging.error(msg)
1817
        return [msg]
1818

    
1819
      # add the c-* parameters to args
1820
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1821
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1822
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1823
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1824
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1825
                   ])
1826

    
1827
    else:
1828
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1829

    
1830
    args.append("--create-device")
1831
    result = utils.RunCmd(args)
1832
    if result.failed:
1833
      msg = ("Can't change syncer rate: %s - %s" %
1834
             (result.fail_reason, result.output))
1835
      logging.error(msg)
1836
      return [msg]
1837

    
1838
    return []
1839

    
1840
  def SetSyncParams(self, params):
1841
    """Set the synchronization parameters of the DRBD syncer.
1842

1843
    @type params: dict
1844
    @param params: LD level disk parameters related to the synchronization
1845
    @rtype: list
1846
    @return: a list of error messages, emitted both by the current node and by
1847
    children. An empty list means no errors
1848

1849
    """
1850
    if self.minor is None:
1851
      err = "Not attached during SetSyncParams"
1852
      logging.info(err)
1853
      return [err]
1854

    
1855
    children_result = super(DRBD8, self).SetSyncParams(params)
1856
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1857
    return children_result
1858

    
1859
  def PauseResumeSync(self, pause):
1860
    """Pauses or resumes the sync of a DRBD device.
1861

1862
    @param pause: Wether to pause or resume
1863
    @return: the success of the operation
1864

1865
    """
1866
    if self.minor is None:
1867
      logging.info("Not attached during PauseSync")
1868
      return False
1869

    
1870
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1871

    
1872
    if pause:
1873
      cmd = "pause-sync"
1874
    else:
1875
      cmd = "resume-sync"
1876

    
1877
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1878
    if result.failed:
1879
      logging.error("Can't %s: %s - %s", cmd,
1880
                    result.fail_reason, result.output)
1881
    return not result.failed and children_result
1882

    
1883
  def GetProcStatus(self):
1884
    """Return device data from /proc.
1885

1886
    """
1887
    if self.minor is None:
1888
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1889
    proc_info = self._MassageProcData(self._GetProcData())
1890
    if self.minor not in proc_info:
1891
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1892
    return DRBD8Status(proc_info[self.minor])
1893

    
1894
  def GetSyncStatus(self):
1895
    """Returns the sync status of the device.
1896

1897

1898
    If sync_percent is None, it means all is ok
1899
    If estimated_time is None, it means we can't estimate
1900
    the time needed, otherwise it's the time left in seconds.
1901

1902

1903
    We set the is_degraded parameter to True on two conditions:
1904
    network not connected or local disk missing.
1905

1906
    We compute the ldisk parameter based on whether we have a local
1907
    disk or not.
1908

1909
    @rtype: objects.BlockDevStatus
1910

1911
    """
1912
    if self.minor is None and not self.Attach():
1913
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1914

    
1915
    stats = self.GetProcStatus()
1916
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1917

    
1918
    if stats.is_disk_uptodate:
1919
      ldisk_status = constants.LDS_OKAY
1920
    elif stats.is_diskless:
1921
      ldisk_status = constants.LDS_FAULTY
1922
    else:
1923
      ldisk_status = constants.LDS_UNKNOWN
1924

    
1925
    return objects.BlockDevStatus(dev_path=self.dev_path,
1926
                                  major=self.major,
1927
                                  minor=self.minor,
1928
                                  sync_percent=stats.sync_percent,
1929
                                  estimated_time=stats.est_time,
1930
                                  is_degraded=is_degraded,
1931
                                  ldisk_status=ldisk_status)
1932

    
1933
  def Open(self, force=False):
1934
    """Make the local state primary.
1935

1936
    If the 'force' parameter is given, the '-o' option is passed to
1937
    drbdsetup. Since this is a potentially dangerous operation, the
1938
    force flag should be only given after creation, when it actually
1939
    is mandatory.
1940

1941
    """
1942
    if self.minor is None and not self.Attach():
1943
      logging.error("DRBD cannot attach to a device during open")
1944
      return False
1945
    cmd = ["drbdsetup", self.dev_path, "primary"]
1946
    if force:
1947
      cmd.append("-o")
1948
    result = utils.RunCmd(cmd)
1949
    if result.failed:
1950
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
1951
                  result.output)
1952

    
1953
  def Close(self):
1954
    """Make the local state secondary.
1955

1956
    This will, of course, fail if the device is in use.
1957

1958
    """
1959
    if self.minor is None and not self.Attach():
1960
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
1961
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
1962
    if result.failed:
1963
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
1964
                  self.minor, result.output)
1965

    
1966
  def DisconnectNet(self):
1967
    """Removes network configuration.
1968

1969
    This method shutdowns the network side of the device.
1970

1971
    The method will wait up to a hardcoded timeout for the device to
1972
    go into standalone after the 'disconnect' command before
1973
    re-configuring it, as sometimes it takes a while for the
1974
    disconnect to actually propagate and thus we might issue a 'net'
1975
    command while the device is still connected. If the device will
1976
    still be attached to the network and we time out, we raise an
1977
    exception.
1978

1979
    """
1980
    if self.minor is None:
1981
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
1982

    
1983
    if None in (self._lhost, self._lport, self._rhost, self._rport):
1984
      _ThrowError("drbd%d: DRBD disk missing network info in"
1985
                  " DisconnectNet()", self.minor)
1986

    
1987
    class _DisconnectStatus:
1988
      def __init__(self, ever_disconnected):
1989
        self.ever_disconnected = ever_disconnected
1990

    
1991
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
1992

    
1993
    def _WaitForDisconnect():
1994
      if self.GetProcStatus().is_standalone:
1995
        return
1996

    
1997
      # retry the disconnect, it seems possible that due to a well-time
1998
      # disconnect on the peer, my disconnect command might be ignored and
1999
      # forgotten
2000
      dstatus.ever_disconnected = \
2001
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2002

    
2003
      raise utils.RetryAgain()
2004

    
2005
    # Keep start time
2006
    start_time = time.time()
2007

    
2008
    try:
2009
      # Start delay at 100 milliseconds and grow up to 2 seconds
2010
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2011
                  self._NET_RECONFIG_TIMEOUT)
2012
    except utils.RetryTimeout:
2013
      if dstatus.ever_disconnected:
2014
        msg = ("drbd%d: device did not react to the"
2015
               " 'disconnect' command in a timely manner")
2016
      else:
2017
        msg = "drbd%d: can't shutdown network, even after multiple retries"
2018

    
2019
      _ThrowError(msg, self.minor)
2020

    
2021
    reconfig_time = time.time() - start_time
2022
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2023
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2024
                   self.minor, reconfig_time)
2025

    
2026
  def AttachNet(self, multimaster):
2027
    """Reconnects the network.
2028

2029
    This method connects the network side of the device with a
2030
    specified multi-master flag. The device needs to be 'Standalone'
2031
    but have valid network configuration data.
2032

2033
    Args:
2034
      - multimaster: init the network in dual-primary mode
2035

2036
    """
2037
    if self.minor is None:
2038
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2039

    
2040
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2041
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2042

    
2043
    status = self.GetProcStatus()
2044

    
2045
    if not status.is_standalone:
2046
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2047

    
2048
    self._AssembleNet(self.minor,
2049
                      (self._lhost, self._lport, self._rhost, self._rport),
2050
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2051
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2052

    
2053
  def Attach(self):
2054
    """Check if our minor is configured.
2055

2056
    This doesn't do any device configurations - it only checks if the
2057
    minor is in a state different from Unconfigured.
2058

2059
    Note that this function will not change the state of the system in
2060
    any way (except in case of side-effects caused by reading from
2061
    /proc).
2062

2063
    """
2064
    used_devs = self.GetUsedDevs()
2065
    if self._aminor in used_devs:
2066
      minor = self._aminor
2067
    else:
2068
      minor = None
2069

    
2070
    self._SetFromMinor(minor)
2071
    return minor is not None
2072

    
2073
  def Assemble(self):
2074
    """Assemble the drbd.
2075

2076
    Method:
2077
      - if we have a configured device, we try to ensure that it matches
2078
        our config
2079
      - if not, we create it from zero
2080
      - anyway, set the device parameters
2081

2082
    """
2083
    super(DRBD8, self).Assemble()
2084

    
2085
    self.Attach()
2086
    if self.minor is None:
2087
      # local device completely unconfigured
2088
      self._FastAssemble()
2089
    else:
2090
      # we have to recheck the local and network status and try to fix
2091
      # the device
2092
      self._SlowAssemble()
2093

    
2094
    sync_errors = self.SetSyncParams(self.params)
2095
    if sync_errors:
2096
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2097
                  (self.minor, utils.CommaJoin(sync_errors)))
2098

    
2099
  def _SlowAssemble(self):
2100
    """Assembles the DRBD device from a (partially) configured device.
2101

2102
    In case of partially attached (local device matches but no network
2103
    setup), we perform the network attach. If successful, we re-test
2104
    the attach if can return success.
2105

2106
    """
2107
    # TODO: Rewrite to not use a for loop just because there is 'break'
2108
    # pylint: disable=W0631
2109
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2110
    for minor in (self._aminor,):
2111
      info = self._GetDevInfo(self._GetShowData(minor))
2112
      match_l = self._MatchesLocal(info)
2113
      match_r = self._MatchesNet(info)
2114

    
2115
      if match_l and match_r:
2116
        # everything matches
2117
        break
2118

    
2119
      if match_l and not match_r and "local_addr" not in info:
2120
        # disk matches, but not attached to network, attach and recheck
2121
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2122
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2123
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2124
          break
2125
        else:
2126
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2127
                      " show' disagrees", minor)
2128

    
2129
      if match_r and "local_dev" not in info:
2130
        # no local disk, but network attached and it matches
2131
        self._AssembleLocal(minor, self._children[0].dev_path,
2132
                            self._children[1].dev_path, self.size)
2133
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2134
          break
2135
        else:
2136
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2137
                      " show' disagrees", minor)
2138

    
2139
      # this case must be considered only if we actually have local
2140
      # storage, i.e. not in diskless mode, because all diskless
2141
      # devices are equal from the point of view of local
2142
      # configuration
2143
      if (match_l and "local_dev" in info and
2144
          not match_r and "local_addr" in info):
2145
        # strange case - the device network part points to somewhere
2146
        # else, even though its local storage is ours; as we own the
2147
        # drbd space, we try to disconnect from the remote peer and
2148
        # reconnect to our correct one
2149
        try:
2150
          self._ShutdownNet(minor)
2151
        except errors.BlockDeviceError, err:
2152
          _ThrowError("drbd%d: device has correct local storage, wrong"
2153
                      " remote peer and is unable to disconnect in order"
2154
                      " to attach to the correct peer: %s", minor, str(err))
2155
        # note: _AssembleNet also handles the case when we don't want
2156
        # local storage (i.e. one or more of the _[lr](host|port) is
2157
        # None)
2158
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2159
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2160
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2161
          break
2162
        else:
2163
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2164
                      " show' disagrees", minor)
2165

    
2166
    else:
2167
      minor = None
2168

    
2169
    self._SetFromMinor(minor)
2170
    if minor is None:
2171
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2172
                  self._aminor)
2173

    
2174
  def _FastAssemble(self):
2175
    """Assemble the drbd device from zero.
2176

2177
    This is run when in Assemble we detect our minor is unused.
2178

2179
    """
2180
    minor = self._aminor
2181
    if self._children and self._children[0] and self._children[1]:
2182
      self._AssembleLocal(minor, self._children[0].dev_path,
2183
                          self._children[1].dev_path, self.size)
2184
    if self._lhost and self._lport and self._rhost and self._rport:
2185
      self._AssembleNet(minor,
2186
                        (self._lhost, self._lport, self._rhost, self._rport),
2187
                        constants.DRBD_NET_PROTOCOL,
2188
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2189
    self._SetFromMinor(minor)
2190

    
2191
  @classmethod
2192
  def _ShutdownLocal(cls, minor):
2193
    """Detach from the local device.
2194

2195
    I/Os will continue to be served from the remote device. If we
2196
    don't have a remote device, this operation will fail.
2197

2198
    """
2199
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2200
    if result.failed:
2201
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2202

    
2203
  @classmethod
2204
  def _ShutdownNet(cls, minor):
2205
    """Disconnect from the remote peer.
2206

2207
    This fails if we don't have a local device.
2208

2209
    """
2210
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2211
    if result.failed:
2212
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2213

    
2214
  @classmethod
2215
  def _ShutdownAll(cls, minor):
2216
    """Deactivate the device.
2217

2218
    This will, of course, fail if the device is in use.
2219

2220
    """
2221
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2222
    if result.failed:
2223
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2224
                  minor, result.output)
2225

    
2226
  def Shutdown(self):
2227
    """Shutdown the DRBD device.
2228

2229
    """
2230
    if self.minor is None and not self.Attach():
2231
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2232
      return
2233
    minor = self.minor
2234
    self.minor = None
2235
    self.dev_path = None
2236
    self._ShutdownAll(minor)
2237

    
2238
  def Remove(self):
2239
    """Stub remove for DRBD devices.
2240

2241
    """
2242
    self.Shutdown()
2243

    
2244
  @classmethod
2245
  def Create(cls, unique_id, children, size, params, excl_stor):
2246
    """Create a new DRBD8 device.
2247

2248
    Since DRBD devices are not created per se, just assembled, this
2249
    function only initializes the metadata.
2250

2251
    """
2252
    if len(children) != 2:
2253
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2254
    if excl_stor:
2255
      raise errors.ProgrammerError("DRBD device requested with"
2256
                                   " exclusive_storage")
2257
    # check that the minor is unused
2258
    aminor = unique_id[4]
2259
    proc_info = cls._MassageProcData(cls._GetProcData())
2260
    if aminor in proc_info:
2261
      status = DRBD8Status(proc_info[aminor])
2262
      in_use = status.is_in_use
2263
    else:
2264
      in_use = False
2265
    if in_use:
2266
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2267
    meta = children[1]
2268
    meta.Assemble()
2269
    if not meta.Attach():
2270
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2271
                  aminor, meta)
2272
    cls._CheckMetaSize(meta.dev_path)
2273
    cls._InitMeta(aminor, meta.dev_path)
2274
    return cls(unique_id, children, size, params)
2275

    
2276
  def Grow(self, amount, dryrun, backingstore):
2277
    """Resize the DRBD device and its backing storage.
2278

2279
    """
2280
    if self.minor is None:
2281
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2282
    if len(self._children) != 2 or None in self._children:
2283
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2284
    self._children[0].Grow(amount, dryrun, backingstore)
2285
    if dryrun or backingstore:
2286
      # DRBD does not support dry-run mode and is not backing storage,
2287
      # so we'll return here
2288
      return
2289
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2290
                           "%dm" % (self.size + amount)])
2291
    if result.failed:
2292
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2293

    
2294

    
2295
class FileStorage(BlockDev):
2296
  """File device.
2297

2298
  This class represents the a file storage backend device.
2299

2300
  The unique_id for the file device is a (file_driver, file_path) tuple.
2301

2302
  """
2303
  def __init__(self, unique_id, children, size, params):
2304
    """Initalizes a file device backend.
2305

2306
    """
2307
    if children:
2308
      raise errors.BlockDeviceError("Invalid setup for file device")
2309
    super(FileStorage, self).__init__(unique_id, children, size, params)
2310
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2311
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2312
    self.driver = unique_id[0]
2313
    self.dev_path = unique_id[1]
2314

    
2315
    CheckFileStoragePath(self.dev_path)
2316

    
2317
    self.Attach()
2318

    
2319
  def Assemble(self):
2320
    """Assemble the device.
2321

2322
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2323

2324
    """
2325
    if not os.path.exists(self.dev_path):
2326
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2327

    
2328
  def Shutdown(self):
2329
    """Shutdown the device.
2330

2331
    This is a no-op for the file type, as we don't deactivate
2332
    the file on shutdown.
2333

2334
    """
2335
    pass
2336

    
2337
  def Open(self, force=False):
2338
    """Make the device ready for I/O.
2339

2340
    This is a no-op for the file type.
2341

2342
    """
2343
    pass
2344

    
2345
  def Close(self):
2346
    """Notifies that the device will no longer be used for I/O.
2347

2348
    This is a no-op for the file type.
2349

2350
    """
2351
    pass
2352

    
2353
  def Remove(self):
2354
    """Remove the file backing the block device.
2355

2356
    @rtype: boolean
2357
    @return: True if the removal was successful
2358

2359
    """
2360
    try:
2361
      os.remove(self.dev_path)
2362
    except OSError, err:
2363
      if err.errno != errno.ENOENT:
2364
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2365

    
2366
  def Rename(self, new_id):
2367
    """Renames the file.
2368

2369
    """
2370
    # TODO: implement rename for file-based storage
2371
    _ThrowError("Rename is not supported for file-based storage")
2372

    
2373
  def Grow(self, amount, dryrun, backingstore):
2374
    """Grow the file
2375

2376
    @param amount: the amount (in mebibytes) to grow with
2377

2378
    """
2379
    if not backingstore:
2380
      return
2381
    # Check that the file exists
2382
    self.Assemble()
2383
    current_size = self.GetActualSize()
2384
    new_size = current_size + amount * 1024 * 1024
2385
    assert new_size > current_size, "Cannot Grow with a negative amount"
2386
    # We can't really simulate the growth
2387
    if dryrun:
2388
      return
2389
    try:
2390
      f = open(self.dev_path, "a+")
2391
      f.truncate(new_size)
2392
      f.close()
2393
    except EnvironmentError, err:
2394
      _ThrowError("Error in file growth: %", str(err))
2395

    
2396
  def Attach(self):
2397
    """Attach to an existing file.
2398

2399
    Check if this file already exists.
2400

2401
    @rtype: boolean
2402
    @return: True if file exists
2403

2404
    """
2405
    self.attached = os.path.exists(self.dev_path)
2406
    return self.attached
2407

    
2408
  def GetActualSize(self):
2409
    """Return the actual disk size.
2410

2411
    @note: the device needs to be active when this is called
2412

2413
    """
2414
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2415
    try:
2416
      st = os.stat(self.dev_path)
2417
      return st.st_size
2418
    except OSError, err:
2419
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2420

    
2421
  @classmethod
2422
  def Create(cls, unique_id, children, size, params, excl_stor):
2423
    """Create a new file.
2424

2425
    @param size: the size of file in MiB
2426

2427
    @rtype: L{bdev.FileStorage}
2428
    @return: an instance of FileStorage
2429

2430
    """
2431
    if excl_stor:
2432
      raise errors.ProgrammerError("FileStorage device requested with"
2433
                                   " exclusive_storage")
2434
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2435
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2436

    
2437
    dev_path = unique_id[1]
2438

    
2439
    CheckFileStoragePath(dev_path)
2440

    
2441
    try:
2442
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2443
      f = os.fdopen(fd, "w")
2444
      f.truncate(size * 1024 * 1024)
2445
      f.close()
2446
    except EnvironmentError, err:
2447
      if err.errno == errno.EEXIST:
2448
        _ThrowError("File already existing: %s", dev_path)
2449
      _ThrowError("Error in file creation: %", str(err))
2450

    
2451
    return FileStorage(unique_id, children, size, params)
2452

    
2453

    
2454
class PersistentBlockDevice(BlockDev):
2455
  """A block device with persistent node
2456

2457
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2458
  udev helpers are probably required to give persistent, human-friendly
2459
  names.
2460

2461
  For the time being, pathnames are required to lie under /dev.
2462

2463
  """
2464
  def __init__(self, unique_id, children, size, params):
2465
    """Attaches to a static block device.
2466

2467
    The unique_id is a path under /dev.
2468

2469
    """
2470
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2471
                                                params)
2472
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2473
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2474
    self.dev_path = unique_id[1]
2475
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2476
      raise ValueError("Full path '%s' lies outside /dev" %
2477
                              os.path.realpath(self.dev_path))
2478
    # TODO: this is just a safety guard checking that we only deal with devices
2479
    # we know how to handle. In the future this will be integrated with
2480
    # external storage backends and possible values will probably be collected
2481
    # from the cluster configuration.
2482
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2483
      raise ValueError("Got persistent block device of invalid type: %s" %
2484
                       unique_id[0])
2485

    
2486
    self.major = self.minor = None
2487
    self.Attach()
2488

    
2489
  @classmethod
2490
  def Create(cls, unique_id, children, size, params, excl_stor):
2491
    """Create a new device
2492

2493
    This is a noop, we only return a PersistentBlockDevice instance
2494

2495
    """
2496
    if excl_stor:
2497
      raise errors.ProgrammerError("Persistent block device requested with"
2498
                                   " exclusive_storage")
2499
    return PersistentBlockDevice(unique_id, children, 0, params)
2500

    
2501
  def Remove(self):
2502
    """Remove a device
2503

2504
    This is a noop
2505

2506
    """
2507
    pass
2508

    
2509
  def Rename(self, new_id):
2510
    """Rename this device.
2511

2512
    """
2513
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2514

    
2515
  def Attach(self):
2516
    """Attach to an existing block device.
2517

2518

2519
    """
2520
    self.attached = False
2521
    try:
2522
      st = os.stat(self.dev_path)
2523
    except OSError, err:
2524
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2525
      return False
2526

    
2527
    if not stat.S_ISBLK(st.st_mode):
2528
      logging.error("%s is not a block device", self.dev_path)
2529
      return False
2530

    
2531
    self.major = os.major(st.st_rdev)
2532
    self.minor = os.minor(st.st_rdev)
2533
    self.attached = True
2534

    
2535
    return True
2536

    
2537
  def Assemble(self):
2538
    """Assemble the device.
2539

2540
    """
2541
    pass
2542

    
2543
  def Shutdown(self):
2544
    """Shutdown the device.
2545

2546
    """
2547
    pass
2548

    
2549
  def Open(self, force=False):
2550
    """Make the device ready for I/O.
2551

2552
    """
2553
    pass
2554

    
2555
  def Close(self):
2556
    """Notifies that the device will no longer be used for I/O.
2557

2558
    """
2559
    pass
2560

    
2561
  def Grow(self, amount, dryrun, backingstore):
2562
    """Grow the logical volume.
2563

2564
    """
2565
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2566

    
2567

    
2568
class RADOSBlockDevice(BlockDev):
2569
  """A RADOS Block Device (rbd).
2570

2571
  This class implements the RADOS Block Device for the backend. You need
2572
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2573
  this to be functional.
2574

2575
  """
2576
  def __init__(self, unique_id, children, size, params):
2577
    """Attaches to an rbd device.
2578

2579
    """
2580
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2581
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2582
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2583

    
2584
    self.driver, self.rbd_name = unique_id
2585

    
2586
    self.major = self.minor = None
2587
    self.Attach()
2588

    
2589
  @classmethod
2590
  def Create(cls, unique_id, children, size, params, excl_stor):
2591
    """Create a new rbd device.
2592

2593
    Provision a new rbd volume inside a RADOS pool.
2594

2595
    """
2596
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2597
      raise errors.ProgrammerError("Invalid configuration data %s" %
2598
                                   str(unique_id))
2599
    if excl_stor:
2600
      raise errors.ProgrammerError("RBD device requested with"
2601
                                   " exclusive_storage")
2602
    rbd_pool = params[constants.LDP_POOL]
2603
    rbd_name = unique_id[1]
2604

    
2605
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2606
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2607
           rbd_name, "--size", "%s" % size]
2608
    result = utils.RunCmd(cmd)
2609
    if result.failed:
2610
      _ThrowError("rbd creation failed (%s): %s",
2611
                  result.fail_reason, result.output)
2612

    
2613
    return RADOSBlockDevice(unique_id, children, size, params)
2614

    
2615
  def Remove(self):
2616
    """Remove the rbd device.
2617

2618
    """
2619
    rbd_pool = self.params[constants.LDP_POOL]
2620
    rbd_name = self.unique_id[1]
2621

    
2622
    if not self.minor and not self.Attach():
2623
      # The rbd device doesn't exist.
2624
      return
2625

    
2626
    # First shutdown the device (remove mappings).
2627
    self.Shutdown()
2628

    
2629
    # Remove the actual Volume (Image) from the RADOS cluster.
2630
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2631
    result = utils.RunCmd(cmd)
2632
    if result.failed:
2633
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2634
                  result.fail_reason, result.output)
2635

    
2636
  def Rename(self, new_id):
2637
    """Rename this device.
2638

2639
    """
2640
    pass
2641

    
2642
  def Attach(self):
2643
    """Attach to an existing rbd device.
2644

2645
    This method maps the rbd volume that matches our name with
2646
    an rbd device and then attaches to this device.
2647

2648
    """
2649
    self.attached = False
2650

    
2651
    # Map the rbd volume to a block device under /dev
2652
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2653

    
2654
    try:
2655
      st = os.stat(self.dev_path)
2656
    except OSError, err:
2657
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2658
      return False
2659

    
2660
    if not stat.S_ISBLK(st.st_mode):
2661
      logging.error("%s is not a block device", self.dev_path)
2662
      return False
2663

    
2664
    self.major = os.major(st.st_rdev)
2665
    self.minor = os.minor(st.st_rdev)
2666
    self.attached = True
2667

    
2668
    return True
2669

    
2670
  def _MapVolumeToBlockdev(self, unique_id):
2671
    """Maps existing rbd volumes to block devices.
2672

2673
    This method should be idempotent if the mapping already exists.
2674

2675
    @rtype: string
2676
    @return: the block device path that corresponds to the volume
2677

2678
    """
2679
    pool = self.params[constants.LDP_POOL]
2680
    name = unique_id[1]
2681

    
2682
    # Check if the mapping already exists.
2683
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2684
    result = utils.RunCmd(showmap_cmd)
2685
    if result.failed:
2686
      _ThrowError("rbd showmapped failed (%s): %s",
2687
                  result.fail_reason, result.output)
2688

    
2689
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2690

    
2691
    if rbd_dev:
2692
      # The mapping exists. Return it.
2693
      return rbd_dev
2694

    
2695
    # The mapping doesn't exist. Create it.
2696
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2697
    result = utils.RunCmd(map_cmd)
2698
    if result.failed:
2699
      _ThrowError("rbd map failed (%s): %s",
2700
                  result.fail_reason, result.output)
2701

    
2702
    # Find the corresponding rbd device.
2703
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2704
    result = utils.RunCmd(showmap_cmd)
2705
    if result.failed:
2706
      _ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
2707
                  result.fail_reason, result.output)
2708

    
2709
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2710

    
2711
    if not rbd_dev:
2712
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2713
                  " device in output of showmapped, for volume: %s", name)
2714

    
2715
    # The device was successfully mapped. Return it.
2716
    return rbd_dev
2717

    
2718
  @staticmethod
2719
  def _ParseRbdShowmappedOutput(output, volume_name):
2720
    """Parse the output of `rbd showmapped'.
2721

2722
    This method parses the output of `rbd showmapped' and returns
2723
    the rbd block device path (e.g. /dev/rbd0) that matches the
2724
    given rbd volume.
2725

2726
    @type output: string
2727
    @param output: the whole output of `rbd showmapped'
2728
    @type volume_name: string
2729
    @param volume_name: the name of the volume whose device we search for
2730
    @rtype: string or None
2731
    @return: block device path if the volume is mapped, else None
2732

2733
    """
2734
    allfields = 5
2735
    volumefield = 2
2736
    devicefield = 4
2737

    
2738
    field_sep = "\t"
2739

    
2740
    lines = output.splitlines()
2741
    splitted_lines = map(lambda l: l.split(field_sep), lines)
2742

    
2743
    # Check empty output.
2744
    if not splitted_lines:
2745
      _ThrowError("rbd showmapped returned empty output")
2746

    
2747
    # Check showmapped header line, to determine number of fields.
2748
    field_cnt = len(splitted_lines[0])
2749
    if field_cnt != allfields:
2750
      _ThrowError("Cannot parse rbd showmapped output because its format"
2751
                  " seems to have changed; expected %s fields, found %s",
2752
                  allfields, field_cnt)
2753

    
2754
    matched_lines = \
2755
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2756
             splitted_lines)
2757

    
2758
    if len(matched_lines) > 1:
2759
      _ThrowError("The rbd volume %s is mapped more than once."
2760
                  " This shouldn't happen, try to unmap the extra"
2761
                  " devices manually.", volume_name)
2762

    
2763
    if matched_lines:
2764
      # rbd block device found. Return it.
2765
      rbd_dev = matched_lines[0][devicefield]
2766
      return rbd_dev
2767

    
2768
    # The given volume is not mapped.
2769
    return None
2770

    
2771
  def Assemble(self):
2772
    """Assemble the device.
2773

2774
    """
2775
    pass
2776

    
2777
  def Shutdown(self):
2778
    """Shutdown the device.
2779

2780
    """
2781
    if not self.minor and not self.Attach():
2782
      # The rbd device doesn't exist.
2783
      return
2784

    
2785
    # Unmap the block device from the Volume.
2786
    self._UnmapVolumeFromBlockdev(self.unique_id)
2787

    
2788
    self.minor = None
2789
    self.dev_path = None
2790

    
2791
  def _UnmapVolumeFromBlockdev(self, unique_id):
2792
    """Unmaps the rbd device from the Volume it is mapped.
2793

2794
    Unmaps the rbd device from the Volume it was previously mapped to.
2795
    This method should be idempotent if the Volume isn't mapped.
2796

2797
    """
2798
    pool = self.params[constants.LDP_POOL]
2799
    name = unique_id[1]
2800

    
2801
    # Check if the mapping already exists.
2802
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2803
    result = utils.RunCmd(showmap_cmd)
2804
    if result.failed:
2805
      _ThrowError("rbd showmapped failed [during unmap](%s): %s",
2806
                  result.fail_reason, result.output)
2807

    
2808
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2809

    
2810
    if rbd_dev:
2811
      # The mapping exists. Unmap the rbd device.
2812
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2813
      result = utils.RunCmd(unmap_cmd)
2814
      if result.failed:
2815
        _ThrowError("rbd unmap failed (%s): %s",
2816
                    result.fail_reason, result.output)
2817

    
2818
  def Open(self, force=False):
2819
    """Make the device ready for I/O.
2820

2821
    """
2822
    pass
2823

    
2824
  def Close(self):
2825
    """Notifies that the device will no longer be used for I/O.
2826

2827
    """
2828
    pass
2829

    
2830
  def Grow(self, amount, dryrun, backingstore):
2831
    """Grow the Volume.
2832

2833
    @type amount: integer
2834
    @param amount: the amount (in mebibytes) to grow with
2835
    @type dryrun: boolean
2836
    @param dryrun: whether to execute the operation in simulation mode
2837
        only, without actually increasing the size
2838

2839
    """
2840
    if not backingstore:
2841
      return
2842
    if not self.Attach():
2843
      _ThrowError("Can't attach to rbd device during Grow()")
2844

    
2845
    if dryrun:
2846
      # the rbd tool does not support dry runs of resize operations.
2847
      # Since rbd volumes are thinly provisioned, we assume
2848
      # there is always enough free space for the operation.
2849
      return
2850

    
2851
    rbd_pool = self.params[constants.LDP_POOL]
2852
    rbd_name = self.unique_id[1]
2853
    new_size = self.size + amount
2854

    
2855
    # Resize the rbd volume (Image) inside the RADOS cluster.
2856
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2857
           rbd_name, "--size", "%s" % new_size]
2858
    result = utils.RunCmd(cmd)
2859
    if result.failed:
2860
      _ThrowError("rbd resize failed (%s): %s",
2861
                  result.fail_reason, result.output)
2862

    
2863

    
2864
class ExtStorageDevice(BlockDev):
2865
  """A block device provided by an ExtStorage Provider.
2866

2867
  This class implements the External Storage Interface, which means
2868
  handling of the externally provided block devices.
2869

2870
  """
2871
  def __init__(self, unique_id, children, size, params):
2872
    """Attaches to an extstorage block device.
2873

2874
    """
2875
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2876
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2877
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2878

    
2879
    self.driver, self.vol_name = unique_id
2880
    self.ext_params = params
2881

    
2882
    self.major = self.minor = None
2883
    self.Attach()
2884

    
2885
  @classmethod
2886
  def Create(cls, unique_id, children, size, params, excl_stor):
2887
    """Create a new extstorage device.
2888

2889
    Provision a new volume using an extstorage provider, which will
2890
    then be mapped to a block device.
2891

2892
    """
2893
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2894
      raise errors.ProgrammerError("Invalid configuration data %s" %
2895
                                   str(unique_id))
2896
    if excl_stor:
2897
      raise errors.ProgrammerError("extstorage device requested with"
2898
                                   " exclusive_storage")
2899

    
2900
    # Call the External Storage's create script,
2901
    # to provision a new Volume inside the External Storage
2902
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
2903
                      params, str(size))
2904

    
2905
    return ExtStorageDevice(unique_id, children, size, params)
2906

    
2907
  def Remove(self):
2908
    """Remove the extstorage device.
2909

2910
    """
2911
    if not self.minor and not self.Attach():
2912
      # The extstorage device doesn't exist.
2913
      return
2914

    
2915
    # First shutdown the device (remove mappings).
2916
    self.Shutdown()
2917

    
2918
    # Call the External Storage's remove script,
2919
    # to remove the Volume from the External Storage
2920
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
2921
                      self.ext_params)
2922

    
2923
  def Rename(self, new_id):
2924
    """Rename this device.
2925

2926
    """
2927
    pass
2928

    
2929
  def Attach(self):
2930
    """Attach to an existing extstorage device.
2931

2932
    This method maps the extstorage volume that matches our name with
2933
    a corresponding block device and then attaches to this device.
2934

2935
    """
2936
    self.attached = False
2937

    
2938
    # Call the External Storage's attach script,
2939
    # to attach an existing Volume to a block device under /dev
2940
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
2941
                                      self.unique_id, self.ext_params)
2942

    
2943
    try:
2944
      st = os.stat(self.dev_path)
2945
    except OSError, err:
2946
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2947
      return False
2948

    
2949
    if not stat.S_ISBLK(st.st_mode):
2950
      logging.error("%s is not a block device", self.dev_path)
2951
      return False
2952

    
2953
    self.major = os.major(st.st_rdev)
2954
    self.minor = os.minor(st.st_rdev)
2955
    self.attached = True
2956

    
2957
    return True
2958

    
2959
  def Assemble(self):
2960
    """Assemble the device.
2961

2962
    """
2963
    pass
2964

    
2965
  def Shutdown(self):
2966
    """Shutdown the device.
2967

2968
    """
2969
    if not self.minor and not self.Attach():
2970
      # The extstorage device doesn't exist.
2971
      return
2972

    
2973
    # Call the External Storage's detach script,
2974
    # to detach an existing Volume from it's block device under /dev
2975
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
2976
                      self.ext_params)
2977

    
2978
    self.minor = None
2979
    self.dev_path = None
2980

    
2981
  def Open(self, force=False):
2982
    """Make the device ready for I/O.
2983

2984
    """
2985
    pass
2986

    
2987
  def Close(self):
2988
    """Notifies that the device will no longer be used for I/O.
2989

2990
    """
2991
    pass
2992

    
2993
  def Grow(self, amount, dryrun, backingstore):
2994
    """Grow the Volume.
2995

2996
    @type amount: integer
2997
    @param amount: the amount (in mebibytes) to grow with
2998
    @type dryrun: boolean
2999
    @param dryrun: whether to execute the operation in simulation mode
3000
        only, without actually increasing the size
3001

3002
    """
3003
    if not backingstore:
3004
      return
3005
    if not self.Attach():
3006
      _ThrowError("Can't attach to extstorage device during Grow()")
3007

    
3008
    if dryrun:
3009
      # we do not support dry runs of resize operations for now.
3010
      return
3011

    
3012
    new_size = self.size + amount
3013

    
3014
    # Call the External Storage's grow script,
3015
    # to grow an existing Volume inside the External Storage
3016
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3017
                      self.ext_params, str(self.size), grow=str(new_size))
3018

    
3019
  def SetInfo(self, text):
3020
    """Update metadata with info text.
3021

3022
    """
3023
    # Replace invalid characters
3024
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3025
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3026

    
3027
    # Only up to 128 characters are allowed
3028
    text = text[:128]
3029

    
3030
    # Call the External Storage's setinfo script,
3031
    # to set metadata for an existing Volume inside the External Storage
3032
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3033
                      self.ext_params, metadata=text)
3034

    
3035

    
3036
def _ExtStorageAction(action, unique_id, ext_params,
3037
                      size=None, grow=None, metadata=None):
3038
  """Take an External Storage action.
3039

3040
  Take an External Storage action concerning or affecting
3041
  a specific Volume inside the External Storage.
3042

3043
  @type action: string
3044
  @param action: which action to perform. One of:
3045
                 create / remove / grow / attach / detach
3046
  @type unique_id: tuple (driver, vol_name)
3047
  @param unique_id: a tuple containing the type of ExtStorage (driver)
3048
                    and the Volume name
3049
  @type ext_params: dict
3050
  @param ext_params: ExtStorage parameters
3051
  @type size: integer
3052
  @param size: the size of the Volume in mebibytes
3053
  @type grow: integer
3054
  @param grow: the new size in mebibytes (after grow)
3055
  @type metadata: string
3056
  @param metadata: metadata info of the Volume, for use by the provider
3057
  @rtype: None or a block device path (during attach)
3058

3059
  """
3060
  driver, vol_name = unique_id
3061

    
3062
  # Create an External Storage instance of type `driver'
3063
  status, inst_es = ExtStorageFromDisk(driver)
3064
  if not status:
3065
    _ThrowError("%s" % inst_es)
3066

    
3067
  # Create the basic environment for the driver's scripts
3068
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3069
                                      grow, metadata)
3070

    
3071
  # Do not use log file for action `attach' as we need
3072
  # to get the output from RunResult
3073
  # TODO: find a way to have a log file for attach too
3074
  logfile = None
3075
  if action is not constants.ES_ACTION_ATTACH:
3076
    logfile = _VolumeLogName(action, driver, vol_name)
3077

    
3078
  # Make sure the given action results in a valid script
3079
  if action not in constants.ES_SCRIPTS:
3080
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3081
                action)
3082

    
3083
  # Find out which external script to run according the given action
3084
  script_name = action + "_script"
3085
  script = getattr(inst_es, script_name)
3086

    
3087
  # Run the external script
3088
  result = utils.RunCmd([script], env=create_env,
3089
                        cwd=inst_es.path, output=logfile,)
3090
  if result.failed:
3091
    logging.error("External storage's %s command '%s' returned"
3092
                  " error: %s, logfile: %s, output: %s",
3093
                  action, result.cmd, result.fail_reason,
3094
                  logfile, result.output)
3095

    
3096
    # If logfile is 'None' (during attach), it breaks TailFile
3097
    # TODO: have a log file for attach too
3098
    if action is not constants.ES_ACTION_ATTACH:
3099
      lines = [utils.SafeEncode(val)
3100
               for val in utils.TailFile(logfile, lines=20)]
3101
    else:
3102
      lines = result.output[-20:]
3103

    
3104
    _ThrowError("External storage's %s script failed (%s), last"
3105
                " lines of output:\n%s",
3106
                action, result.fail_reason, "\n".join(lines))
3107

    
3108
  if action == constants.ES_ACTION_ATTACH:
3109
    return result.stdout
3110

    
3111

    
3112
def ExtStorageFromDisk(name, base_dir=None):
3113
  """Create an ExtStorage instance from disk.
3114

3115
  This function will return an ExtStorage instance
3116
  if the given name is a valid ExtStorage name.
3117

3118
  @type base_dir: string
3119
  @keyword base_dir: Base directory containing ExtStorage installations.
3120
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3121
  @rtype: tuple
3122
  @return: True and the ExtStorage instance if we find a valid one, or
3123
      False and the diagnose message on error
3124

3125
  """
3126
  if base_dir is None:
3127
    es_base_dir = pathutils.ES_SEARCH_PATH
3128
  else:
3129
    es_base_dir = [base_dir]
3130

    
3131
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3132

    
3133
  if es_dir is None:
3134
    return False, ("Directory for External Storage Provider %s not"
3135
                   " found in search path" % name)
3136

    
3137
  # ES Files dictionary, we will populate it with the absolute path
3138
  # names; if the value is True, then it is a required file, otherwise
3139
  # an optional one
3140
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3141

    
3142
  es_files[constants.ES_PARAMETERS_FILE] = True
3143

    
3144
  for (filename, _) in es_files.items():
3145
    es_files[filename] = utils.PathJoin(es_dir, filename)
3146

    
3147
    try:
3148
      st = os.stat(es_files[filename])
3149
    except EnvironmentError, err:
3150
      return False, ("File '%s' under path '%s' is missing (%s)" %
3151
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3152

    
3153
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3154
      return False, ("File '%s' under path '%s' is not a regular file" %
3155
                     (filename, es_dir))
3156

    
3157
    if filename in constants.ES_SCRIPTS:
3158
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3159
        return False, ("File '%s' under path '%s' is not executable" %
3160
                       (filename, es_dir))
3161

    
3162
  parameters = []
3163
  if constants.ES_PARAMETERS_FILE in es_files:
3164
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3165
    try:
3166
      parameters = utils.ReadFile(parameters_file).splitlines()
3167
    except EnvironmentError, err:
3168
      return False, ("Error while reading the EXT parameters file at %s: %s" %
3169
                     (parameters_file, utils.ErrnoOrStr(err)))
3170
    parameters = [v.split(None, 1) for v in parameters]
3171

    
3172
  es_obj = \
3173
    objects.ExtStorage(name=name, path=es_dir,
3174
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3175
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3176
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3177
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3178
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3179
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3180
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3181
                       supported_parameters=parameters)
3182
  return True, es_obj
3183

    
3184

    
3185
def _ExtStorageEnvironment(unique_id, ext_params,
3186
                           size=None, grow=None, metadata=None):
3187
  """Calculate the environment for an External Storage script.
3188

3189
  @type unique_id: tuple (driver, vol_name)
3190
  @param unique_id: ExtStorage pool and name of the Volume
3191
  @type ext_params: dict
3192
  @param ext_params: the EXT parameters
3193
  @type size: string
3194
  @param size: size of the Volume (in mebibytes)
3195
  @type grow: string
3196
  @param grow: new size of Volume after grow (in mebibytes)
3197
  @type metadata: string
3198
  @param metadata: metadata info of the Volume
3199
  @rtype: dict
3200
  @return: dict of environment variables
3201

3202
  """
3203
  vol_name = unique_id[1]
3204

    
3205
  result = {}
3206
  result["VOL_NAME"] = vol_name
3207

    
3208
  # EXT params
3209
  for pname, pvalue in ext_params.items():
3210
    result["EXTP_%s" % pname.upper()] = str(pvalue)
3211

    
3212
  if size is not None:
3213
    result["VOL_SIZE"] = size
3214

    
3215
  if grow is not None:
3216
    result["VOL_NEW_SIZE"] = grow
3217

    
3218
  if metadata is not None:
3219
    result["VOL_METADATA"] = metadata
3220

    
3221
  return result
3222

    
3223

    
3224
def _VolumeLogName(kind, es_name, volume):
3225
  """Compute the ExtStorage log filename for a given Volume and operation.
3226

3227
  @type kind: string
3228
  @param kind: the operation type (e.g. create, remove etc.)
3229
  @type es_name: string
3230
  @param es_name: the ExtStorage name
3231
  @type volume: string
3232
  @param volume: the name of the Volume inside the External Storage
3233

3234
  """
3235
  # Check if the extstorage log dir is a valid dir
3236
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3237
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3238

    
3239
  # TODO: Use tempfile.mkstemp to create unique filename
3240
  base = ("%s-%s-%s-%s.log" %
3241
          (kind, es_name, volume, utils.TimestampForFilename()))
3242
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3243

    
3244

    
3245
DEV_MAP = {
3246
  constants.LD_LV: LogicalVolume,
3247
  constants.LD_DRBD8: DRBD8,
3248
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3249
  constants.LD_RBD: RADOSBlockDevice,
3250
  constants.LD_EXT: ExtStorageDevice,
3251
  }
3252

    
3253
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3254
  DEV_MAP[constants.LD_FILE] = FileStorage
3255

    
3256

    
3257
def _VerifyDiskType(dev_type):
3258
  if dev_type not in DEV_MAP:
3259
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3260

    
3261

    
3262
def _VerifyDiskParams(disk):
3263
  """Verifies if all disk parameters are set.
3264

3265
  """
3266
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3267
  if missing:
3268
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3269
                                 missing)
3270

    
3271

    
3272
def FindDevice(disk, children):
3273
  """Search for an existing, assembled device.
3274

3275
  This will succeed only if the device exists and is assembled, but it
3276
  does not do any actions in order to activate the device.
3277

3278
  @type disk: L{objects.Disk}
3279
  @param disk: the disk object to find
3280
  @type children: list of L{bdev.BlockDev}
3281
  @param children: the list of block devices that are children of the device
3282
                  represented by the disk parameter
3283

3284
  """
3285
  _VerifyDiskType(disk.dev_type)
3286
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3287
                                  disk.params)
3288
  if not device.attached:
3289
    return None
3290
  return device
3291

    
3292

    
3293
def Assemble(disk, children):
3294
  """Try to attach or assemble an existing device.
3295

3296
  This will attach to assemble the device, as needed, to bring it
3297
  fully up. It must be safe to run on already-assembled devices.
3298

3299
  @type disk: L{objects.Disk}
3300
  @param disk: the disk object to assemble
3301
  @type children: list of L{bdev.BlockDev}
3302
  @param children: the list of block devices that are children of the device
3303
                  represented by the disk parameter
3304

3305
  """
3306
  _VerifyDiskType(disk.dev_type)
3307
  _VerifyDiskParams(disk)
3308
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3309
                                  disk.params)
3310
  device.Assemble()
3311
  return device
3312

    
3313

    
3314
def Create(disk, children, excl_stor):
3315
  """Create a device.
3316

3317
  @type disk: L{objects.Disk}
3318
  @param disk: the disk object to create
3319
  @type children: list of L{bdev.BlockDev}
3320
  @param children: the list of block devices that are children of the device
3321
                  represented by the disk parameter
3322
  @type excl_stor: boolean
3323
  @param excl_stor: Whether exclusive_storage is active
3324

3325
  """
3326
  _VerifyDiskType(disk.dev_type)
3327
  _VerifyDiskParams(disk)
3328
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3329
                                         disk.params, excl_stor)
3330
  return device