Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ 1a3c5d4e

History | View | Annotate | Download (105.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41

    
42

    
43
# Size of reads in _CanReadDevice
44
_DEVICE_READ_SIZE = 128 * 1024
45

    
46

    
47
def _IgnoreError(fn, *args, **kwargs):
48
  """Executes the given function, ignoring BlockDeviceErrors.
49

50
  This is used in order to simplify the execution of cleanup or
51
  rollback functions.
52

53
  @rtype: boolean
54
  @return: True when fn didn't raise an exception, False otherwise
55

56
  """
57
  try:
58
    fn(*args, **kwargs)
59
    return True
60
  except errors.BlockDeviceError, err:
61
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
62
    return False
63

    
64

    
65
def _ThrowError(msg, *args):
66
  """Log an error to the node daemon and the raise an exception.
67

68
  @type msg: string
69
  @param msg: the text of the exception
70
  @raise errors.BlockDeviceError
71

72
  """
73
  if args:
74
    msg = msg % args
75
  logging.error(msg)
76
  raise errors.BlockDeviceError(msg)
77

    
78

    
79
def _CheckResult(result):
80
  """Throws an error if the given result is a failed one.
81

82
  @param result: result from RunCmd
83

84
  """
85
  if result.failed:
86
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
87
                result.output)
88

    
89

    
90
def _CanReadDevice(path):
91
  """Check if we can read from the given device.
92

93
  This tries to read the first 128k of the device.
94

95
  """
96
  try:
97
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
98
    return True
99
  except EnvironmentError:
100
    logging.warning("Can't read from device %s", path, exc_info=True)
101
    return False
102

    
103

    
104
def _GetForbiddenFileStoragePaths():
105
  """Builds a list of path prefixes which shouldn't be used for file storage.
106

107
  @rtype: frozenset
108

109
  """
110
  paths = set([
111
    "/boot",
112
    "/dev",
113
    "/etc",
114
    "/home",
115
    "/proc",
116
    "/root",
117
    "/sys",
118
    ])
119

    
120
  for prefix in ["", "/usr", "/usr/local"]:
121
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
122
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
123

    
124
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
125

    
126

    
127
def _ComputeWrongFileStoragePaths(paths,
128
                                  _forbidden=_GetForbiddenFileStoragePaths()):
129
  """Cross-checks a list of paths for prefixes considered bad.
130

131
  Some paths, e.g. "/bin", should not be used for file storage.
132

133
  @type paths: list
134
  @param paths: List of paths to be checked
135
  @rtype: list
136
  @return: Sorted list of paths for which the user should be warned
137

138
  """
139
  def _Check(path):
140
    return (not os.path.isabs(path) or
141
            path in _forbidden or
142
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
143

    
144
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
145

    
146

    
147
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
148
  """Returns a list of file storage paths whose prefix is considered bad.
149

150
  See L{_ComputeWrongFileStoragePaths}.
151

152
  """
153
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
154

    
155

    
156
def _CheckFileStoragePath(path, allowed):
157
  """Checks if a path is in a list of allowed paths for file storage.
158

159
  @type path: string
160
  @param path: Path to check
161
  @type allowed: list
162
  @param allowed: List of allowed paths
163
  @raise errors.FileStoragePathError: If the path is not allowed
164

165
  """
166
  if not os.path.isabs(path):
167
    raise errors.FileStoragePathError("File storage path must be absolute,"
168
                                      " got '%s'" % path)
169

    
170
  for i in allowed:
171
    if not os.path.isabs(i):
172
      logging.info("Ignoring relative path '%s' for file storage", i)
173
      continue
174

    
175
    if utils.IsBelowDir(i, path):
176
      break
177
  else:
178
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
179
                                      " storage" % path)
180

    
181

    
182
def _LoadAllowedFileStoragePaths(filename):
183
  """Loads file containing allowed file storage paths.
184

185
  @rtype: list
186
  @return: List of allowed paths (can be an empty list)
187

188
  """
189
  try:
190
    contents = utils.ReadFile(filename)
191
  except EnvironmentError:
192
    return []
193
  else:
194
    return utils.FilterEmptyLinesAndComments(contents)
195

    
196

    
197
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
198
  """Checks if a path is allowed for file storage.
199

200
  @type path: string
201
  @param path: Path to check
202
  @raise errors.FileStoragePathError: If the path is not allowed
203

204
  """
205
  allowed = _LoadAllowedFileStoragePaths(_filename)
206

    
207
  if _ComputeWrongFileStoragePaths([path]):
208
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
209
                                      path)
210

    
211
  _CheckFileStoragePath(path, allowed)
212

    
213

    
214
class BlockDev(object):
215
  """Block device abstract class.
216

217
  A block device can be in the following states:
218
    - not existing on the system, and by `Create()` it goes into:
219
    - existing but not setup/not active, and by `Assemble()` goes into:
220
    - active read-write and by `Open()` it goes into
221
    - online (=used, or ready for use)
222

223
  A device can also be online but read-only, however we are not using
224
  the readonly state (LV has it, if needed in the future) and we are
225
  usually looking at this like at a stack, so it's easier to
226
  conceptualise the transition from not-existing to online and back
227
  like a linear one.
228

229
  The many different states of the device are due to the fact that we
230
  need to cover many device types:
231
    - logical volumes are created, lvchange -a y $lv, and used
232
    - drbd devices are attached to a local disk/remote peer and made primary
233

234
  A block device is identified by three items:
235
    - the /dev path of the device (dynamic)
236
    - a unique ID of the device (static)
237
    - it's major/minor pair (dynamic)
238

239
  Not all devices implement both the first two as distinct items. LVM
240
  logical volumes have their unique ID (the pair volume group, logical
241
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
242
  the /dev path is again dynamic and the unique id is the pair (host1,
243
  dev1), (host2, dev2).
244

245
  You can get to a device in two ways:
246
    - creating the (real) device, which returns you
247
      an attached instance (lvcreate)
248
    - attaching of a python instance to an existing (real) device
249

250
  The second point, the attachement to a device, is different
251
  depending on whether the device is assembled or not. At init() time,
252
  we search for a device with the same unique_id as us. If found,
253
  good. It also means that the device is already assembled. If not,
254
  after assembly we'll have our correct major/minor.
255

256
  """
257
  def __init__(self, unique_id, children, size, params):
258
    self._children = children
259
    self.dev_path = None
260
    self.unique_id = unique_id
261
    self.major = None
262
    self.minor = None
263
    self.attached = False
264
    self.size = size
265
    self.params = params
266

    
267
  def Assemble(self):
268
    """Assemble the device from its components.
269

270
    Implementations of this method by child classes must ensure that:
271
      - after the device has been assembled, it knows its major/minor
272
        numbers; this allows other devices (usually parents) to probe
273
        correctly for their children
274
      - calling this method on an existing, in-use device is safe
275
      - if the device is already configured (and in an OK state),
276
        this method is idempotent
277

278
    """
279
    pass
280

    
281
  def Attach(self):
282
    """Find a device which matches our config and attach to it.
283

284
    """
285
    raise NotImplementedError
286

    
287
  def Close(self):
288
    """Notifies that the device will no longer be used for I/O.
289

290
    """
291
    raise NotImplementedError
292

    
293
  @classmethod
294
  def Create(cls, unique_id, children, size, params, excl_stor):
295
    """Create the device.
296

297
    If the device cannot be created, it will return None
298
    instead. Error messages go to the logging system.
299

300
    Note that for some devices, the unique_id is used, and for other,
301
    the children. The idea is that these two, taken together, are
302
    enough for both creation and assembly (later).
303

304
    """
305
    raise NotImplementedError
306

    
307
  def Remove(self):
308
    """Remove this device.
309

310
    This makes sense only for some of the device types: LV and file
311
    storage. Also note that if the device can't attach, the removal
312
    can't be completed.
313

314
    """
315
    raise NotImplementedError
316

    
317
  def Rename(self, new_id):
318
    """Rename this device.
319

320
    This may or may not make sense for a given device type.
321

322
    """
323
    raise NotImplementedError
324

    
325
  def Open(self, force=False):
326
    """Make the device ready for use.
327

328
    This makes the device ready for I/O. For now, just the DRBD
329
    devices need this.
330

331
    The force parameter signifies that if the device has any kind of
332
    --force thing, it should be used, we know what we are doing.
333

334
    """
335
    raise NotImplementedError
336

    
337
  def Shutdown(self):
338
    """Shut down the device, freeing its children.
339

340
    This undoes the `Assemble()` work, except for the child
341
    assembling; as such, the children on the device are still
342
    assembled after this call.
343

344
    """
345
    raise NotImplementedError
346

    
347
  def SetSyncParams(self, params):
348
    """Adjust the synchronization parameters of the mirror.
349

350
    In case this is not a mirroring device, this is no-op.
351

352
    @param params: dictionary of LD level disk parameters related to the
353
    synchronization.
354
    @rtype: list
355
    @return: a list of error messages, emitted both by the current node and by
356
    children. An empty list means no errors.
357

358
    """
359
    result = []
360
    if self._children:
361
      for child in self._children:
362
        result.extend(child.SetSyncParams(params))
363
    return result
364

    
365
  def PauseResumeSync(self, pause):
366
    """Pause/Resume the sync of the mirror.
367

368
    In case this is not a mirroring device, this is no-op.
369

370
    @param pause: Whether to pause or resume
371

372
    """
373
    result = True
374
    if self._children:
375
      for child in self._children:
376
        result = result and child.PauseResumeSync(pause)
377
    return result
378

    
379
  def GetSyncStatus(self):
380
    """Returns the sync status of the device.
381

382
    If this device is a mirroring device, this function returns the
383
    status of the mirror.
384

385
    If sync_percent is None, it means the device is not syncing.
386

387
    If estimated_time is None, it means we can't estimate
388
    the time needed, otherwise it's the time left in seconds.
389

390
    If is_degraded is True, it means the device is missing
391
    redundancy. This is usually a sign that something went wrong in
392
    the device setup, if sync_percent is None.
393

394
    The ldisk parameter represents the degradation of the local
395
    data. This is only valid for some devices, the rest will always
396
    return False (not degraded).
397

398
    @rtype: objects.BlockDevStatus
399

400
    """
401
    return objects.BlockDevStatus(dev_path=self.dev_path,
402
                                  major=self.major,
403
                                  minor=self.minor,
404
                                  sync_percent=None,
405
                                  estimated_time=None,
406
                                  is_degraded=False,
407
                                  ldisk_status=constants.LDS_OKAY)
408

    
409
  def CombinedSyncStatus(self):
410
    """Calculate the mirror status recursively for our children.
411

412
    The return value is the same as for `GetSyncStatus()` except the
413
    minimum percent and maximum time are calculated across our
414
    children.
415

416
    @rtype: objects.BlockDevStatus
417

418
    """
419
    status = self.GetSyncStatus()
420

    
421
    min_percent = status.sync_percent
422
    max_time = status.estimated_time
423
    is_degraded = status.is_degraded
424
    ldisk_status = status.ldisk_status
425

    
426
    if self._children:
427
      for child in self._children:
428
        child_status = child.GetSyncStatus()
429

    
430
        if min_percent is None:
431
          min_percent = child_status.sync_percent
432
        elif child_status.sync_percent is not None:
433
          min_percent = min(min_percent, child_status.sync_percent)
434

    
435
        if max_time is None:
436
          max_time = child_status.estimated_time
437
        elif child_status.estimated_time is not None:
438
          max_time = max(max_time, child_status.estimated_time)
439

    
440
        is_degraded = is_degraded or child_status.is_degraded
441

    
442
        if ldisk_status is None:
443
          ldisk_status = child_status.ldisk_status
444
        elif child_status.ldisk_status is not None:
445
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
446

    
447
    return objects.BlockDevStatus(dev_path=self.dev_path,
448
                                  major=self.major,
449
                                  minor=self.minor,
450
                                  sync_percent=min_percent,
451
                                  estimated_time=max_time,
452
                                  is_degraded=is_degraded,
453
                                  ldisk_status=ldisk_status)
454

    
455
  def SetInfo(self, text):
456
    """Update metadata with info text.
457

458
    Only supported for some device types.
459

460
    """
461
    for child in self._children:
462
      child.SetInfo(text)
463

    
464
  def Grow(self, amount, dryrun, backingstore):
465
    """Grow the block device.
466

467
    @type amount: integer
468
    @param amount: the amount (in mebibytes) to grow with
469
    @type dryrun: boolean
470
    @param dryrun: whether to execute the operation in simulation mode
471
        only, without actually increasing the size
472
    @param backingstore: whether to execute the operation on backing storage
473
        only, or on "logical" storage only; e.g. DRBD is logical storage,
474
        whereas LVM, file, RBD are backing storage
475

476
    """
477
    raise NotImplementedError
478

    
479
  def GetActualSize(self):
480
    """Return the actual disk size.
481

482
    @note: the device needs to be active when this is called
483

484
    """
485
    assert self.attached, "BlockDevice not attached in GetActualSize()"
486
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
487
    if result.failed:
488
      _ThrowError("blockdev failed (%s): %s",
489
                  result.fail_reason, result.output)
490
    try:
491
      sz = int(result.output.strip())
492
    except (ValueError, TypeError), err:
493
      _ThrowError("Failed to parse blockdev output: %s", str(err))
494
    return sz
495

    
496
  def __repr__(self):
497
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
498
            (self.__class__, self.unique_id, self._children,
499
             self.major, self.minor, self.dev_path))
500

    
501

    
502
class LogicalVolume(BlockDev):
503
  """Logical Volume block device.
504

505
  """
506
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
507
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
508
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
509

    
510
  def __init__(self, unique_id, children, size, params):
511
    """Attaches to a LV device.
512

513
    The unique_id is a tuple (vg_name, lv_name)
514

515
    """
516
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
517
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
518
      raise ValueError("Invalid configuration data %s" % str(unique_id))
519
    self._vg_name, self._lv_name = unique_id
520
    self._ValidateName(self._vg_name)
521
    self._ValidateName(self._lv_name)
522
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
523
    self._degraded = True
524
    self.major = self.minor = self.pe_size = self.stripe_count = None
525
    self.Attach()
526

    
527
  @staticmethod
528
  def _GetStdPvSize(pvs_info):
529
    """Return the the standard PV size (used with exclusive storage).
530

531
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
532
    @rtype: float
533
    @return: size in MiB
534

535
    """
536
    assert len(pvs_info) > 0
537
    smallest = min([pv.size for pv in pvs_info])
538
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
539

    
540
  @staticmethod
541
  def _ComputeNumPvs(size, pvs_info):
542
    """Compute the number of PVs needed for an LV (with exclusive storage).
543

544
    @type size: float
545
    @param size: LV size
546
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
547
    @rtype: integer
548
    @return: number of PVs needed
549
    """
550
    assert len(pvs_info) > 0
551
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
552
    return int(math.ceil(float(size) / pv_size))
553

    
554
  @staticmethod
555
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
556
    """Return a list of empty PVs, by name.
557

558
    """
559
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
560
    if max_pvs is not None:
561
      empty_pvs = empty_pvs[:max_pvs]
562
    return map((lambda pv: pv.name), empty_pvs)
563

    
564
  @classmethod
565
  def Create(cls, unique_id, children, size, params, excl_stor):
566
    """Create a new logical volume.
567

568
    """
569
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
570
      raise errors.ProgrammerError("Invalid configuration data %s" %
571
                                   str(unique_id))
572
    vg_name, lv_name = unique_id
573
    cls._ValidateName(vg_name)
574
    cls._ValidateName(lv_name)
575
    pvs_info = cls.GetPVInfo([vg_name])
576
    if not pvs_info:
577
      if excl_stor:
578
        msg = "No (empty) PVs found"
579
      else:
580
        msg = "Can't compute PV info for vg %s" % vg_name
581
      _ThrowError(msg)
582
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
583

    
584
    pvlist = [pv.name for pv in pvs_info]
585
    if compat.any(":" in v for v in pvlist):
586
      _ThrowError("Some of your PVs have the invalid character ':' in their"
587
                  " name, this is not supported - please filter them out"
588
                  " in lvm.conf using either 'filter' or 'preferred_names'")
589

    
590
    current_pvs = len(pvlist)
591
    desired_stripes = params[constants.LDP_STRIPES]
592
    stripes = min(current_pvs, desired_stripes)
593

    
594
    if excl_stor:
595
      err_msgs = utils.LvmExclusiveCheckNodePvs(pvs_info)
596
      if err_msgs:
597
        for m in err_msgs:
598
          logging.warning(m)
599
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
600
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
601
      current_pvs = len(pvlist)
602
      if current_pvs < req_pvs:
603
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
604
                    " %d available, %d needed", size, current_pvs, req_pvs)
605
      assert current_pvs == len(pvlist)
606
      if stripes > current_pvs:
607
        # No warning issued for this, as it's no surprise
608
        stripes = current_pvs
609

    
610
    else:
611
      if stripes < desired_stripes:
612
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
613
                        " available.", desired_stripes, vg_name, current_pvs)
614
      free_size = sum([pv.free for pv in pvs_info])
615
      # The size constraint should have been checked from the master before
616
      # calling the create function.
617
      if free_size < size:
618
        _ThrowError("Not enough free space: required %s,"
619
                    " available %s", size, free_size)
620

    
621
    # If the free space is not well distributed, we won't be able to
622
    # create an optimally-striped volume; in that case, we want to try
623
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
624
    # stripes
625
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
626
    for stripes_arg in range(stripes, 0, -1):
627
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
628
      if not result.failed:
629
        break
630
    if result.failed:
631
      _ThrowError("LV create failed (%s): %s",
632
                  result.fail_reason, result.output)
633
    return LogicalVolume(unique_id, children, size, params)
634

    
635
  @staticmethod
636
  def _GetVolumeInfo(lvm_cmd, fields):
637
    """Returns LVM Volumen infos using lvm_cmd
638

639
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
640
    @param fields: Fields to return
641
    @return: A list of dicts each with the parsed fields
642

643
    """
644
    if not fields:
645
      raise errors.ProgrammerError("No fields specified")
646

    
647
    sep = "|"
648
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
649
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
650

    
651
    result = utils.RunCmd(cmd)
652
    if result.failed:
653
      raise errors.CommandError("Can't get the volume information: %s - %s" %
654
                                (result.fail_reason, result.output))
655

    
656
    data = []
657
    for line in result.stdout.splitlines():
658
      splitted_fields = line.strip().split(sep)
659

    
660
      if len(fields) != len(splitted_fields):
661
        raise errors.CommandError("Can't parse %s output: line '%s'" %
662
                                  (lvm_cmd, line))
663

    
664
      data.append(splitted_fields)
665

    
666
    return data
667

    
668
  @classmethod
669
  def GetPVInfo(cls, vg_names, filter_allocatable=True):
670
    """Get the free space info for PVs in a volume group.
671

672
    @param vg_names: list of volume group names, if empty all will be returned
673
    @param filter_allocatable: whether to skip over unallocatable PVs
674

675
    @rtype: list
676
    @return: list of objects.LvmPvInfo objects
677

678
    """
679
    try:
680
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
681
                                        "pv_attr", "pv_size"])
682
    except errors.GenericError, err:
683
      logging.error("Can't get PV information: %s", err)
684
      return None
685

    
686
    data = []
687
    for (pv_name, vg_name, pv_free, pv_attr, pv_size) in info:
688
      # (possibly) skip over pvs which are not allocatable
689
      if filter_allocatable and pv_attr[0] != "a":
690
        continue
691
      # (possibly) skip over pvs which are not in the right volume group(s)
692
      if vg_names and vg_name not in vg_names:
693
        continue
694
      pvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
695
                              size=float(pv_size), free=float(pv_free),
696
                              attributes=pv_attr)
697
      data.append(pvi)
698

    
699
    return data
700

    
701
  @classmethod
702
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
703
    """Get the free space info for specific VGs.
704

705
    @param vg_names: list of volume group names, if empty all will be returned
706
    @param excl_stor: whether exclusive_storage is enabled
707
    @param filter_readonly: whether to skip over readonly VGs
708

709
    @rtype: list
710
    @return: list of tuples (free_space, total_size, name) with free_space in
711
             MiB
712

713
    """
714
    try:
715
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
716
                                        "vg_size"])
717
    except errors.GenericError, err:
718
      logging.error("Can't get VG information: %s", err)
719
      return None
720

    
721
    data = []
722
    for vg_name, vg_free, vg_attr, vg_size in info:
723
      # (possibly) skip over vgs which are not writable
724
      if filter_readonly and vg_attr[0] == "r":
725
        continue
726
      # (possibly) skip over vgs which are not in the right volume group(s)
727
      if vg_names and vg_name not in vg_names:
728
        continue
729
      data.append((float(vg_free), float(vg_size), vg_name))
730

    
731
    return data
732

    
733
  @classmethod
734
  def _ValidateName(cls, name):
735
    """Validates that a given name is valid as VG or LV name.
736

737
    The list of valid characters and restricted names is taken out of
738
    the lvm(8) manpage, with the simplification that we enforce both
739
    VG and LV restrictions on the names.
740

741
    """
742
    if (not cls._VALID_NAME_RE.match(name) or
743
        name in cls._INVALID_NAMES or
744
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
745
      _ThrowError("Invalid LVM name '%s'", name)
746

    
747
  def Remove(self):
748
    """Remove this logical volume.
749

750
    """
751
    if not self.minor and not self.Attach():
752
      # the LV does not exist
753
      return
754
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
755
                           (self._vg_name, self._lv_name)])
756
    if result.failed:
757
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
758

    
759
  def Rename(self, new_id):
760
    """Rename this logical volume.
761

762
    """
763
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
764
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
765
    new_vg, new_name = new_id
766
    if new_vg != self._vg_name:
767
      raise errors.ProgrammerError("Can't move a logical volume across"
768
                                   " volume groups (from %s to to %s)" %
769
                                   (self._vg_name, new_vg))
770
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
771
    if result.failed:
772
      _ThrowError("Failed to rename the logical volume: %s", result.output)
773
    self._lv_name = new_name
774
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
775

    
776
  def Attach(self):
777
    """Attach to an existing LV.
778

779
    This method will try to see if an existing and active LV exists
780
    which matches our name. If so, its major/minor will be
781
    recorded.
782

783
    """
784
    self.attached = False
785
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
786
                           "--units=m", "--nosuffix",
787
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
788
                           "vg_extent_size,stripes", self.dev_path])
789
    if result.failed:
790
      logging.error("Can't find LV %s: %s, %s",
791
                    self.dev_path, result.fail_reason, result.output)
792
      return False
793
    # the output can (and will) have multiple lines for multi-segment
794
    # LVs, as the 'stripes' parameter is a segment one, so we take
795
    # only the last entry, which is the one we're interested in; note
796
    # that with LVM2 anyway the 'stripes' value must be constant
797
    # across segments, so this is a no-op actually
798
    out = result.stdout.splitlines()
799
    if not out: # totally empty result? splitlines() returns at least
800
                # one line for any non-empty string
801
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
802
      return False
803
    out = out[-1].strip().rstrip(",")
804
    out = out.split(",")
805
    if len(out) != 5:
806
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
807
      return False
808

    
809
    status, major, minor, pe_size, stripes = out
810
    if len(status) < 6:
811
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
812
      return False
813

    
814
    try:
815
      major = int(major)
816
      minor = int(minor)
817
    except (TypeError, ValueError), err:
818
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
819

    
820
    try:
821
      pe_size = int(float(pe_size))
822
    except (TypeError, ValueError), err:
823
      logging.error("Can't parse vg extent size: %s", err)
824
      return False
825

    
826
    try:
827
      stripes = int(stripes)
828
    except (TypeError, ValueError), err:
829
      logging.error("Can't parse the number of stripes: %s", err)
830
      return False
831

    
832
    self.major = major
833
    self.minor = minor
834
    self.pe_size = pe_size
835
    self.stripe_count = stripes
836
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
837
                                      # storage
838
    self.attached = True
839
    return True
840

    
841
  def Assemble(self):
842
    """Assemble the device.
843

844
    We always run `lvchange -ay` on the LV to ensure it's active before
845
    use, as there were cases when xenvg was not active after boot
846
    (also possibly after disk issues).
847

848
    """
849
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
850
    if result.failed:
851
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
852

    
853
  def Shutdown(self):
854
    """Shutdown the device.
855

856
    This is a no-op for the LV device type, as we don't deactivate the
857
    volumes on shutdown.
858

859
    """
860
    pass
861

    
862
  def GetSyncStatus(self):
863
    """Returns the sync status of the device.
864

865
    If this device is a mirroring device, this function returns the
866
    status of the mirror.
867

868
    For logical volumes, sync_percent and estimated_time are always
869
    None (no recovery in progress, as we don't handle the mirrored LV
870
    case). The is_degraded parameter is the inverse of the ldisk
871
    parameter.
872

873
    For the ldisk parameter, we check if the logical volume has the
874
    'virtual' type, which means it's not backed by existing storage
875
    anymore (read from it return I/O error). This happens after a
876
    physical disk failure and subsequent 'vgreduce --removemissing' on
877
    the volume group.
878

879
    The status was already read in Attach, so we just return it.
880

881
    @rtype: objects.BlockDevStatus
882

883
    """
884
    if self._degraded:
885
      ldisk_status = constants.LDS_FAULTY
886
    else:
887
      ldisk_status = constants.LDS_OKAY
888

    
889
    return objects.BlockDevStatus(dev_path=self.dev_path,
890
                                  major=self.major,
891
                                  minor=self.minor,
892
                                  sync_percent=None,
893
                                  estimated_time=None,
894
                                  is_degraded=self._degraded,
895
                                  ldisk_status=ldisk_status)
896

    
897
  def Open(self, force=False):
898
    """Make the device ready for I/O.
899

900
    This is a no-op for the LV device type.
901

902
    """
903
    pass
904

    
905
  def Close(self):
906
    """Notifies that the device will no longer be used for I/O.
907

908
    This is a no-op for the LV device type.
909

910
    """
911
    pass
912

    
913
  def Snapshot(self, size):
914
    """Create a snapshot copy of an lvm block device.
915

916
    @returns: tuple (vg, lv)
917

918
    """
919
    snap_name = self._lv_name + ".snap"
920

    
921
    # remove existing snapshot if found
922
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
923
    _IgnoreError(snap.Remove)
924

    
925
    vg_info = self.GetVGInfo([self._vg_name], False)
926
    if not vg_info:
927
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
928
    free_size, _, _ = vg_info[0]
929
    if free_size < size:
930
      _ThrowError("Not enough free space: required %s,"
931
                  " available %s", size, free_size)
932

    
933
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
934
                               "-n%s" % snap_name, self.dev_path]))
935

    
936
    return (self._vg_name, snap_name)
937

    
938
  def _RemoveOldInfo(self):
939
    """Try to remove old tags from the lv.
940

941
    """
942
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
943
                           self.dev_path])
944
    _CheckResult(result)
945

    
946
    raw_tags = result.stdout.strip()
947
    if raw_tags:
948
      for tag in raw_tags.split(","):
949
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
950
                                   tag.strip(), self.dev_path]))
951

    
952
  def SetInfo(self, text):
953
    """Update metadata with info text.
954

955
    """
956
    BlockDev.SetInfo(self, text)
957

    
958
    self._RemoveOldInfo()
959

    
960
    # Replace invalid characters
961
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
962
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
963

    
964
    # Only up to 128 characters are allowed
965
    text = text[:128]
966

    
967
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
968

    
969
  def Grow(self, amount, dryrun, backingstore):
970
    """Grow the logical volume.
971

972
    """
973
    if not backingstore:
974
      return
975
    if self.pe_size is None or self.stripe_count is None:
976
      if not self.Attach():
977
        _ThrowError("Can't attach to LV during Grow()")
978
    full_stripe_size = self.pe_size * self.stripe_count
979
    rest = amount % full_stripe_size
980
    if rest != 0:
981
      amount += full_stripe_size - rest
982
    cmd = ["lvextend", "-L", "+%dm" % amount]
983
    if dryrun:
984
      cmd.append("--test")
985
    # we try multiple algorithms since the 'best' ones might not have
986
    # space available in the right place, but later ones might (since
987
    # they have less constraints); also note that only recent LVM
988
    # supports 'cling'
989
    for alloc_policy in "contiguous", "cling", "normal":
990
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
991
      if not result.failed:
992
        return
993
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
994

    
995

    
996
class DRBD8Status(object):
997
  """A DRBD status representation class.
998

999
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
1000

1001
  """
1002
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1003
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1004
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1005
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1006
                       # Due to a bug in drbd in the kernel, introduced in
1007
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1008
                       "(?:\s|M)"
1009
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1010

    
1011
  CS_UNCONFIGURED = "Unconfigured"
1012
  CS_STANDALONE = "StandAlone"
1013
  CS_WFCONNECTION = "WFConnection"
1014
  CS_WFREPORTPARAMS = "WFReportParams"
1015
  CS_CONNECTED = "Connected"
1016
  CS_STARTINGSYNCS = "StartingSyncS"
1017
  CS_STARTINGSYNCT = "StartingSyncT"
1018
  CS_WFBITMAPS = "WFBitMapS"
1019
  CS_WFBITMAPT = "WFBitMapT"
1020
  CS_WFSYNCUUID = "WFSyncUUID"
1021
  CS_SYNCSOURCE = "SyncSource"
1022
  CS_SYNCTARGET = "SyncTarget"
1023
  CS_PAUSEDSYNCS = "PausedSyncS"
1024
  CS_PAUSEDSYNCT = "PausedSyncT"
1025
  CSET_SYNC = compat.UniqueFrozenset([
1026
    CS_WFREPORTPARAMS,
1027
    CS_STARTINGSYNCS,
1028
    CS_STARTINGSYNCT,
1029
    CS_WFBITMAPS,
1030
    CS_WFBITMAPT,
1031
    CS_WFSYNCUUID,
1032
    CS_SYNCSOURCE,
1033
    CS_SYNCTARGET,
1034
    CS_PAUSEDSYNCS,
1035
    CS_PAUSEDSYNCT,
1036
    ])
1037

    
1038
  DS_DISKLESS = "Diskless"
1039
  DS_ATTACHING = "Attaching" # transient state
1040
  DS_FAILED = "Failed" # transient state, next: diskless
1041
  DS_NEGOTIATING = "Negotiating" # transient state
1042
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1043
  DS_OUTDATED = "Outdated"
1044
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1045
  DS_CONSISTENT = "Consistent"
1046
  DS_UPTODATE = "UpToDate" # normal state
1047

    
1048
  RO_PRIMARY = "Primary"
1049
  RO_SECONDARY = "Secondary"
1050
  RO_UNKNOWN = "Unknown"
1051

    
1052
  def __init__(self, procline):
1053
    u = self.UNCONF_RE.match(procline)
1054
    if u:
1055
      self.cstatus = self.CS_UNCONFIGURED
1056
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1057
    else:
1058
      m = self.LINE_RE.match(procline)
1059
      if not m:
1060
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1061
      self.cstatus = m.group(1)
1062
      self.lrole = m.group(2)
1063
      self.rrole = m.group(3)
1064
      self.ldisk = m.group(4)
1065
      self.rdisk = m.group(5)
1066

    
1067
    # end reading of data from the LINE_RE or UNCONF_RE
1068

    
1069
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1070
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1071
    self.is_connected = self.cstatus == self.CS_CONNECTED
1072
    self.is_primary = self.lrole == self.RO_PRIMARY
1073
    self.is_secondary = self.lrole == self.RO_SECONDARY
1074
    self.peer_primary = self.rrole == self.RO_PRIMARY
1075
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1076
    self.both_primary = self.is_primary and self.peer_primary
1077
    self.both_secondary = self.is_secondary and self.peer_secondary
1078

    
1079
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1080
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1081

    
1082
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1083
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1084

    
1085
    m = self.SYNC_RE.match(procline)
1086
    if m:
1087
      self.sync_percent = float(m.group(1))
1088
      hours = int(m.group(2))
1089
      minutes = int(m.group(3))
1090
      seconds = int(m.group(4))
1091
      self.est_time = hours * 3600 + minutes * 60 + seconds
1092
    else:
1093
      # we have (in this if branch) no percent information, but if
1094
      # we're resyncing we need to 'fake' a sync percent information,
1095
      # as this is how cmdlib determines if it makes sense to wait for
1096
      # resyncing or not
1097
      if self.is_in_resync:
1098
        self.sync_percent = 0
1099
      else:
1100
        self.sync_percent = None
1101
      self.est_time = None
1102

    
1103

    
1104
class BaseDRBD(BlockDev): # pylint: disable=W0223
1105
  """Base DRBD class.
1106

1107
  This class contains a few bits of common functionality between the
1108
  0.7 and 8.x versions of DRBD.
1109

1110
  """
1111
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1112
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1113
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1114
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1115

    
1116
  _DRBD_MAJOR = 147
1117
  _ST_UNCONFIGURED = "Unconfigured"
1118
  _ST_WFCONNECTION = "WFConnection"
1119
  _ST_CONNECTED = "Connected"
1120

    
1121
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1122
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1123

    
1124
  @staticmethod
1125
  def _GetProcData(filename=_STATUS_FILE):
1126
    """Return data from /proc/drbd.
1127

1128
    """
1129
    try:
1130
      data = utils.ReadFile(filename).splitlines()
1131
    except EnvironmentError, err:
1132
      if err.errno == errno.ENOENT:
1133
        _ThrowError("The file %s cannot be opened, check if the module"
1134
                    " is loaded (%s)", filename, str(err))
1135
      else:
1136
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1137
    if not data:
1138
      _ThrowError("Can't read any data from %s", filename)
1139
    return data
1140

    
1141
  @classmethod
1142
  def _MassageProcData(cls, data):
1143
    """Transform the output of _GetProdData into a nicer form.
1144

1145
    @return: a dictionary of minor: joined lines from /proc/drbd
1146
        for that minor
1147

1148
    """
1149
    results = {}
1150
    old_minor = old_line = None
1151
    for line in data:
1152
      if not line: # completely empty lines, as can be returned by drbd8.0+
1153
        continue
1154
      lresult = cls._VALID_LINE_RE.match(line)
1155
      if lresult is not None:
1156
        if old_minor is not None:
1157
          results[old_minor] = old_line
1158
        old_minor = int(lresult.group(1))
1159
        old_line = line
1160
      else:
1161
        if old_minor is not None:
1162
          old_line += " " + line.strip()
1163
    # add last line
1164
    if old_minor is not None:
1165
      results[old_minor] = old_line
1166
    return results
1167

    
1168
  @classmethod
1169
  def _GetVersion(cls, proc_data):
1170
    """Return the DRBD version.
1171

1172
    This will return a dict with keys:
1173
      - k_major
1174
      - k_minor
1175
      - k_point
1176
      - api
1177
      - proto
1178
      - proto2 (only on drbd > 8.2.X)
1179

1180
    """
1181
    first_line = proc_data[0].strip()
1182
    version = cls._VERSION_RE.match(first_line)
1183
    if not version:
1184
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1185
                                    first_line)
1186

    
1187
    values = version.groups()
1188
    retval = {
1189
      "k_major": int(values[0]),
1190
      "k_minor": int(values[1]),
1191
      "k_point": int(values[2]),
1192
      "api": int(values[3]),
1193
      "proto": int(values[4]),
1194
      }
1195
    if values[5] is not None:
1196
      retval["proto2"] = values[5]
1197

    
1198
    return retval
1199

    
1200
  @staticmethod
1201
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1202
    """Returns DRBD usermode_helper currently set.
1203

1204
    """
1205
    try:
1206
      helper = utils.ReadFile(filename).splitlines()[0]
1207
    except EnvironmentError, err:
1208
      if err.errno == errno.ENOENT:
1209
        _ThrowError("The file %s cannot be opened, check if the module"
1210
                    " is loaded (%s)", filename, str(err))
1211
      else:
1212
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1213
    if not helper:
1214
      _ThrowError("Can't read any data from %s", filename)
1215
    return helper
1216

    
1217
  @staticmethod
1218
  def _DevPath(minor):
1219
    """Return the path to a drbd device for a given minor.
1220

1221
    """
1222
    return "/dev/drbd%d" % minor
1223

    
1224
  @classmethod
1225
  def GetUsedDevs(cls):
1226
    """Compute the list of used DRBD devices.
1227

1228
    """
1229
    data = cls._GetProcData()
1230

    
1231
    used_devs = {}
1232
    for line in data:
1233
      match = cls._VALID_LINE_RE.match(line)
1234
      if not match:
1235
        continue
1236
      minor = int(match.group(1))
1237
      state = match.group(2)
1238
      if state == cls._ST_UNCONFIGURED:
1239
        continue
1240
      used_devs[minor] = state, line
1241

    
1242
    return used_devs
1243

    
1244
  def _SetFromMinor(self, minor):
1245
    """Set our parameters based on the given minor.
1246

1247
    This sets our minor variable and our dev_path.
1248

1249
    """
1250
    if minor is None:
1251
      self.minor = self.dev_path = None
1252
      self.attached = False
1253
    else:
1254
      self.minor = minor
1255
      self.dev_path = self._DevPath(minor)
1256
      self.attached = True
1257

    
1258
  @staticmethod
1259
  def _CheckMetaSize(meta_device):
1260
    """Check if the given meta device looks like a valid one.
1261

1262
    This currently only checks the size, which must be around
1263
    128MiB.
1264

1265
    """
1266
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1267
    if result.failed:
1268
      _ThrowError("Failed to get device size: %s - %s",
1269
                  result.fail_reason, result.output)
1270
    try:
1271
      sectors = int(result.stdout)
1272
    except (TypeError, ValueError):
1273
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1274
    num_bytes = sectors * 512
1275
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1276
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1277
    # the maximum *valid* size of the meta device when living on top
1278
    # of LVM is hard to compute: it depends on the number of stripes
1279
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1280
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1281
    # size meta device; as such, we restrict it to 1GB (a little bit
1282
    # too generous, but making assumptions about PE size is hard)
1283
    if num_bytes > 1024 * 1024 * 1024:
1284
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1285

    
1286
  def Rename(self, new_id):
1287
    """Rename a device.
1288

1289
    This is not supported for drbd devices.
1290

1291
    """
1292
    raise errors.ProgrammerError("Can't rename a drbd device")
1293

    
1294

    
1295
class DRBD8(BaseDRBD):
1296
  """DRBD v8.x block device.
1297

1298
  This implements the local host part of the DRBD device, i.e. it
1299
  doesn't do anything to the supposed peer. If you need a fully
1300
  connected DRBD pair, you need to use this class on both hosts.
1301

1302
  The unique_id for the drbd device is a (local_ip, local_port,
1303
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1304
  two children: the data device and the meta_device. The meta device
1305
  is checked for valid size and is zeroed on create.
1306

1307
  """
1308
  _MAX_MINORS = 255
1309
  _PARSE_SHOW = None
1310

    
1311
  # timeout constants
1312
  _NET_RECONFIG_TIMEOUT = 60
1313

    
1314
  # command line options for barriers
1315
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1316
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1317
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1318
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1319

    
1320
  def __init__(self, unique_id, children, size, params):
1321
    if children and children.count(None) > 0:
1322
      children = []
1323
    if len(children) not in (0, 2):
1324
      raise ValueError("Invalid configuration data %s" % str(children))
1325
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1326
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1327
    (self._lhost, self._lport,
1328
     self._rhost, self._rport,
1329
     self._aminor, self._secret) = unique_id
1330
    if children:
1331
      if not _CanReadDevice(children[1].dev_path):
1332
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1333
        children = []
1334
    super(DRBD8, self).__init__(unique_id, children, size, params)
1335
    self.major = self._DRBD_MAJOR
1336
    version = self._GetVersion(self._GetProcData())
1337
    if version["k_major"] != 8:
1338
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1339
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1340
                  version["k_major"], version["k_minor"])
1341

    
1342
    if (self._lhost is not None and self._lhost == self._rhost and
1343
        self._lport == self._rport):
1344
      raise ValueError("Invalid configuration data, same local/remote %s" %
1345
                       (unique_id,))
1346
    self.Attach()
1347

    
1348
  @classmethod
1349
  def _InitMeta(cls, minor, dev_path):
1350
    """Initialize a meta device.
1351

1352
    This will not work if the given minor is in use.
1353

1354
    """
1355
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1356
    # try to auto-detect existing filesystems or similar (see
1357
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1358
    # care about the first 128MB of data in the device, even though it
1359
    # can be bigger
1360
    result = utils.RunCmd([constants.DD_CMD,
1361
                           "if=/dev/zero", "of=%s" % dev_path,
1362
                           "bs=1048576", "count=128", "oflag=direct"])
1363
    if result.failed:
1364
      _ThrowError("Can't wipe the meta device: %s", result.output)
1365

    
1366
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1367
                           "v08", dev_path, "0", "create-md"])
1368
    if result.failed:
1369
      _ThrowError("Can't initialize meta device: %s", result.output)
1370

    
1371
  @classmethod
1372
  def _FindUnusedMinor(cls):
1373
    """Find an unused DRBD device.
1374

1375
    This is specific to 8.x as the minors are allocated dynamically,
1376
    so non-existing numbers up to a max minor count are actually free.
1377

1378
    """
1379
    data = cls._GetProcData()
1380

    
1381
    highest = None
1382
    for line in data:
1383
      match = cls._UNUSED_LINE_RE.match(line)
1384
      if match:
1385
        return int(match.group(1))
1386
      match = cls._VALID_LINE_RE.match(line)
1387
      if match:
1388
        minor = int(match.group(1))
1389
        highest = max(highest, minor)
1390
    if highest is None: # there are no minors in use at all
1391
      return 0
1392
    if highest >= cls._MAX_MINORS:
1393
      logging.error("Error: no free drbd minors!")
1394
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1395
    return highest + 1
1396

    
1397
  @classmethod
1398
  def _GetShowParser(cls):
1399
    """Return a parser for `drbd show` output.
1400

1401
    This will either create or return an already-created parser for the
1402
    output of the command `drbd show`.
1403

1404
    """
1405
    if cls._PARSE_SHOW is not None:
1406
      return cls._PARSE_SHOW
1407

    
1408
    # pyparsing setup
1409
    lbrace = pyp.Literal("{").suppress()
1410
    rbrace = pyp.Literal("}").suppress()
1411
    lbracket = pyp.Literal("[").suppress()
1412
    rbracket = pyp.Literal("]").suppress()
1413
    semi = pyp.Literal(";").suppress()
1414
    colon = pyp.Literal(":").suppress()
1415
    # this also converts the value to an int
1416
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1417

    
1418
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1419
    defa = pyp.Literal("_is_default").suppress()
1420
    dbl_quote = pyp.Literal('"').suppress()
1421

    
1422
    keyword = pyp.Word(pyp.alphanums + "-")
1423

    
1424
    # value types
1425
    value = pyp.Word(pyp.alphanums + "_-/.:")
1426
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1427
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1428
                 pyp.Word(pyp.nums + ".") + colon + number)
1429
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1430
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1431
                 pyp.Optional(rbracket) + colon + number)
1432
    # meta device, extended syntax
1433
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1434
    # device name, extended syntax
1435
    device_value = pyp.Literal("minor").suppress() + number
1436

    
1437
    # a statement
1438
    stmt = (~rbrace + keyword + ~lbrace +
1439
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1440
                         device_value) +
1441
            pyp.Optional(defa) + semi +
1442
            pyp.Optional(pyp.restOfLine).suppress())
1443

    
1444
    # an entire section
1445
    section_name = pyp.Word(pyp.alphas + "_")
1446
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1447

    
1448
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1449
    bnf.ignore(comment)
1450

    
1451
    cls._PARSE_SHOW = bnf
1452

    
1453
    return bnf
1454

    
1455
  @classmethod
1456
  def _GetShowData(cls, minor):
1457
    """Return the `drbdsetup show` data for a minor.
1458

1459
    """
1460
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1461
    if result.failed:
1462
      logging.error("Can't display the drbd config: %s - %s",
1463
                    result.fail_reason, result.output)
1464
      return None
1465
    return result.stdout
1466

    
1467
  @classmethod
1468
  def _GetDevInfo(cls, out):
1469
    """Parse details about a given DRBD minor.
1470

1471
    This return, if available, the local backing device (as a path)
1472
    and the local and remote (ip, port) information from a string
1473
    containing the output of the `drbdsetup show` command as returned
1474
    by _GetShowData.
1475

1476
    """
1477
    data = {}
1478
    if not out:
1479
      return data
1480

    
1481
    bnf = cls._GetShowParser()
1482
    # run pyparse
1483

    
1484
    try:
1485
      results = bnf.parseString(out)
1486
    except pyp.ParseException, err:
1487
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1488

    
1489
    # and massage the results into our desired format
1490
    for section in results:
1491
      sname = section[0]
1492
      if sname == "_this_host":
1493
        for lst in section[1:]:
1494
          if lst[0] == "disk":
1495
            data["local_dev"] = lst[1]
1496
          elif lst[0] == "meta-disk":
1497
            data["meta_dev"] = lst[1]
1498
            data["meta_index"] = lst[2]
1499
          elif lst[0] == "address":
1500
            data["local_addr"] = tuple(lst[1:])
1501
      elif sname == "_remote_host":
1502
        for lst in section[1:]:
1503
          if lst[0] == "address":
1504
            data["remote_addr"] = tuple(lst[1:])
1505
    return data
1506

    
1507
  def _MatchesLocal(self, info):
1508
    """Test if our local config matches with an existing device.
1509

1510
    The parameter should be as returned from `_GetDevInfo()`. This
1511
    method tests if our local backing device is the same as the one in
1512
    the info parameter, in effect testing if we look like the given
1513
    device.
1514

1515
    """
1516
    if self._children:
1517
      backend, meta = self._children
1518
    else:
1519
      backend = meta = None
1520

    
1521
    if backend is not None:
1522
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1523
    else:
1524
      retval = ("local_dev" not in info)
1525

    
1526
    if meta is not None:
1527
      retval = retval and ("meta_dev" in info and
1528
                           info["meta_dev"] == meta.dev_path)
1529
      retval = retval and ("meta_index" in info and
1530
                           info["meta_index"] == 0)
1531
    else:
1532
      retval = retval and ("meta_dev" not in info and
1533
                           "meta_index" not in info)
1534
    return retval
1535

    
1536
  def _MatchesNet(self, info):
1537
    """Test if our network config matches with an existing device.
1538

1539
    The parameter should be as returned from `_GetDevInfo()`. This
1540
    method tests if our network configuration is the same as the one
1541
    in the info parameter, in effect testing if we look like the given
1542
    device.
1543

1544
    """
1545
    if (((self._lhost is None and not ("local_addr" in info)) and
1546
         (self._rhost is None and not ("remote_addr" in info)))):
1547
      return True
1548

    
1549
    if self._lhost is None:
1550
      return False
1551

    
1552
    if not ("local_addr" in info and
1553
            "remote_addr" in info):
1554
      return False
1555

    
1556
    retval = (info["local_addr"] == (self._lhost, self._lport))
1557
    retval = (retval and
1558
              info["remote_addr"] == (self._rhost, self._rport))
1559
    return retval
1560

    
1561
  def _AssembleLocal(self, minor, backend, meta, size):
1562
    """Configure the local part of a DRBD device.
1563

1564
    """
1565
    args = ["drbdsetup", self._DevPath(minor), "disk",
1566
            backend, meta, "0",
1567
            "-e", "detach",
1568
            "--create-device"]
1569
    if size:
1570
      args.extend(["-d", "%sm" % size])
1571

    
1572
    version = self._GetVersion(self._GetProcData())
1573
    vmaj = version["k_major"]
1574
    vmin = version["k_minor"]
1575
    vrel = version["k_point"]
1576

    
1577
    barrier_args = \
1578
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1579
                                   self.params[constants.LDP_BARRIERS],
1580
                                   self.params[constants.LDP_NO_META_FLUSH])
1581
    args.extend(barrier_args)
1582

    
1583
    if self.params[constants.LDP_DISK_CUSTOM]:
1584
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1585

    
1586
    result = utils.RunCmd(args)
1587
    if result.failed:
1588
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1589

    
1590
  @classmethod
1591
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1592
                              disable_meta_flush):
1593
    """Compute the DRBD command line parameters for disk barriers
1594

1595
    Returns a list of the disk barrier parameters as requested via the
1596
    disabled_barriers and disable_meta_flush arguments, and according to the
1597
    supported ones in the DRBD version vmaj.vmin.vrel
1598

1599
    If the desired option is unsupported, raises errors.BlockDeviceError.
1600

1601
    """
1602
    disabled_barriers_set = frozenset(disabled_barriers)
1603
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1604
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1605
                                    " barriers" % disabled_barriers)
1606

    
1607
    args = []
1608

    
1609
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1610
    # does not exist)
1611
    if not vmaj == 8 and vmin in (0, 2, 3):
1612
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1613
                                    (vmaj, vmin, vrel))
1614

    
1615
    def _AppendOrRaise(option, min_version):
1616
      """Helper for DRBD options"""
1617
      if min_version is not None and vrel >= min_version:
1618
        args.append(option)
1619
      else:
1620
        raise errors.BlockDeviceError("Could not use the option %s as the"
1621
                                      " DRBD version %d.%d.%d does not support"
1622
                                      " it." % (option, vmaj, vmin, vrel))
1623

    
1624
    # the minimum version for each feature is encoded via pairs of (minor
1625
    # version -> x) where x is version in which support for the option was
1626
    # introduced.
1627
    meta_flush_supported = disk_flush_supported = {
1628
      0: 12,
1629
      2: 7,
1630
      3: 0,
1631
      }
1632

    
1633
    disk_drain_supported = {
1634
      2: 7,
1635
      3: 0,
1636
      }
1637

    
1638
    disk_barriers_supported = {
1639
      3: 0,
1640
      }
1641

    
1642
    # meta flushes
1643
    if disable_meta_flush:
1644
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1645
                     meta_flush_supported.get(vmin, None))
1646

    
1647
    # disk flushes
1648
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1649
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1650
                     disk_flush_supported.get(vmin, None))
1651

    
1652
    # disk drain
1653
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1654
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1655
                     disk_drain_supported.get(vmin, None))
1656

    
1657
    # disk barriers
1658
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1659
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1660
                     disk_barriers_supported.get(vmin, None))
1661

    
1662
    return args
1663

    
1664
  def _AssembleNet(self, minor, net_info, protocol,
1665
                   dual_pri=False, hmac=None, secret=None):
1666
    """Configure the network part of the device.
1667

1668
    """
1669
    lhost, lport, rhost, rport = net_info
1670
    if None in net_info:
1671
      # we don't want network connection and actually want to make
1672
      # sure its shutdown
1673
      self._ShutdownNet(minor)
1674
      return
1675

    
1676
    # Workaround for a race condition. When DRBD is doing its dance to
1677
    # establish a connection with its peer, it also sends the
1678
    # synchronization speed over the wire. In some cases setting the
1679
    # sync speed only after setting up both sides can race with DRBD
1680
    # connecting, hence we set it here before telling DRBD anything
1681
    # about its peer.
1682
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1683
    if sync_errors:
1684
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1685
                  (minor, utils.CommaJoin(sync_errors)))
1686

    
1687
    if netutils.IP6Address.IsValid(lhost):
1688
      if not netutils.IP6Address.IsValid(rhost):
1689
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1690
                    (minor, lhost, rhost))
1691
      family = "ipv6"
1692
    elif netutils.IP4Address.IsValid(lhost):
1693
      if not netutils.IP4Address.IsValid(rhost):
1694
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1695
                    (minor, lhost, rhost))
1696
      family = "ipv4"
1697
    else:
1698
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1699

    
1700
    args = ["drbdsetup", self._DevPath(minor), "net",
1701
            "%s:%s:%s" % (family, lhost, lport),
1702
            "%s:%s:%s" % (family, rhost, rport), protocol,
1703
            "-A", "discard-zero-changes",
1704
            "-B", "consensus",
1705
            "--create-device",
1706
            ]
1707
    if dual_pri:
1708
      args.append("-m")
1709
    if hmac and secret:
1710
      args.extend(["-a", hmac, "-x", secret])
1711

    
1712
    if self.params[constants.LDP_NET_CUSTOM]:
1713
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1714

    
1715
    result = utils.RunCmd(args)
1716
    if result.failed:
1717
      _ThrowError("drbd%d: can't setup network: %s - %s",
1718
                  minor, result.fail_reason, result.output)
1719

    
1720
    def _CheckNetworkConfig():
1721
      info = self._GetDevInfo(self._GetShowData(minor))
1722
      if not "local_addr" in info or not "remote_addr" in info:
1723
        raise utils.RetryAgain()
1724

    
1725
      if (info["local_addr"] != (lhost, lport) or
1726
          info["remote_addr"] != (rhost, rport)):
1727
        raise utils.RetryAgain()
1728

    
1729
    try:
1730
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1731
    except utils.RetryTimeout:
1732
      _ThrowError("drbd%d: timeout while configuring network", minor)
1733

    
1734
  def AddChildren(self, devices):
1735
    """Add a disk to the DRBD device.
1736

1737
    """
1738
    if self.minor is None:
1739
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1740
                  self._aminor)
1741
    if len(devices) != 2:
1742
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1743
    info = self._GetDevInfo(self._GetShowData(self.minor))
1744
    if "local_dev" in info:
1745
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1746
    backend, meta = devices
1747
    if backend.dev_path is None or meta.dev_path is None:
1748
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1749
    backend.Open()
1750
    meta.Open()
1751
    self._CheckMetaSize(meta.dev_path)
1752
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1753

    
1754
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1755
    self._children = devices
1756

    
1757
  def RemoveChildren(self, devices):
1758
    """Detach the drbd device from local storage.
1759

1760
    """
1761
    if self.minor is None:
1762
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1763
                  self._aminor)
1764
    # early return if we don't actually have backing storage
1765
    info = self._GetDevInfo(self._GetShowData(self.minor))
1766
    if "local_dev" not in info:
1767
      return
1768
    if len(self._children) != 2:
1769
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1770
                  self._children)
1771
    if self._children.count(None) == 2: # we don't actually have children :)
1772
      logging.warning("drbd%d: requested detach while detached", self.minor)
1773
      return
1774
    if len(devices) != 2:
1775
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1776
    for child, dev in zip(self._children, devices):
1777
      if dev != child.dev_path:
1778
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1779
                    " RemoveChildren", self.minor, dev, child.dev_path)
1780

    
1781
    self._ShutdownLocal(self.minor)
1782
    self._children = []
1783

    
1784
  @classmethod
1785
  def _SetMinorSyncParams(cls, minor, params):
1786
    """Set the parameters of the DRBD syncer.
1787

1788
    This is the low-level implementation.
1789

1790
    @type minor: int
1791
    @param minor: the drbd minor whose settings we change
1792
    @type params: dict
1793
    @param params: LD level disk parameters related to the synchronization
1794
    @rtype: list
1795
    @return: a list of error messages
1796

1797
    """
1798

    
1799
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1800
    if params[constants.LDP_DYNAMIC_RESYNC]:
1801
      version = cls._GetVersion(cls._GetProcData())
1802
      vmin = version["k_minor"]
1803
      vrel = version["k_point"]
1804

    
1805
      # By definition we are using 8.x, so just check the rest of the version
1806
      # number
1807
      if vmin != 3 or vrel < 9:
1808
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1809
               "dynamic resync speed controller" % (vmin, vrel))
1810
        logging.error(msg)
1811
        return [msg]
1812

    
1813
      if params[constants.LDP_PLAN_AHEAD] == 0:
1814
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1815
               " controller at DRBD level. If you want to disable it, please"
1816
               " set the dynamic-resync disk parameter to False.")
1817
        logging.error(msg)
1818
        return [msg]
1819

    
1820
      # add the c-* parameters to args
1821
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1822
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1823
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1824
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1825
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1826
                   ])
1827

    
1828
    else:
1829
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1830

    
1831
    args.append("--create-device")
1832
    result = utils.RunCmd(args)
1833
    if result.failed:
1834
      msg = ("Can't change syncer rate: %s - %s" %
1835
             (result.fail_reason, result.output))
1836
      logging.error(msg)
1837
      return [msg]
1838

    
1839
    return []
1840

    
1841
  def SetSyncParams(self, params):
1842
    """Set the synchronization parameters of the DRBD syncer.
1843

1844
    @type params: dict
1845
    @param params: LD level disk parameters related to the synchronization
1846
    @rtype: list
1847
    @return: a list of error messages, emitted both by the current node and by
1848
    children. An empty list means no errors
1849

1850
    """
1851
    if self.minor is None:
1852
      err = "Not attached during SetSyncParams"
1853
      logging.info(err)
1854
      return [err]
1855

    
1856
    children_result = super(DRBD8, self).SetSyncParams(params)
1857
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1858
    return children_result
1859

    
1860
  def PauseResumeSync(self, pause):
1861
    """Pauses or resumes the sync of a DRBD device.
1862

1863
    @param pause: Wether to pause or resume
1864
    @return: the success of the operation
1865

1866
    """
1867
    if self.minor is None:
1868
      logging.info("Not attached during PauseSync")
1869
      return False
1870

    
1871
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1872

    
1873
    if pause:
1874
      cmd = "pause-sync"
1875
    else:
1876
      cmd = "resume-sync"
1877

    
1878
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1879
    if result.failed:
1880
      logging.error("Can't %s: %s - %s", cmd,
1881
                    result.fail_reason, result.output)
1882
    return not result.failed and children_result
1883

    
1884
  def GetProcStatus(self):
1885
    """Return device data from /proc.
1886

1887
    """
1888
    if self.minor is None:
1889
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1890
    proc_info = self._MassageProcData(self._GetProcData())
1891
    if self.minor not in proc_info:
1892
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1893
    return DRBD8Status(proc_info[self.minor])
1894

    
1895
  def GetSyncStatus(self):
1896
    """Returns the sync status of the device.
1897

1898

1899
    If sync_percent is None, it means all is ok
1900
    If estimated_time is None, it means we can't estimate
1901
    the time needed, otherwise it's the time left in seconds.
1902

1903

1904
    We set the is_degraded parameter to True on two conditions:
1905
    network not connected or local disk missing.
1906

1907
    We compute the ldisk parameter based on whether we have a local
1908
    disk or not.
1909

1910
    @rtype: objects.BlockDevStatus
1911

1912
    """
1913
    if self.minor is None and not self.Attach():
1914
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1915

    
1916
    stats = self.GetProcStatus()
1917
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1918

    
1919
    if stats.is_disk_uptodate:
1920
      ldisk_status = constants.LDS_OKAY
1921
    elif stats.is_diskless:
1922
      ldisk_status = constants.LDS_FAULTY
1923
    else:
1924
      ldisk_status = constants.LDS_UNKNOWN
1925

    
1926
    return objects.BlockDevStatus(dev_path=self.dev_path,
1927
                                  major=self.major,
1928
                                  minor=self.minor,
1929
                                  sync_percent=stats.sync_percent,
1930
                                  estimated_time=stats.est_time,
1931
                                  is_degraded=is_degraded,
1932
                                  ldisk_status=ldisk_status)
1933

    
1934
  def Open(self, force=False):
1935
    """Make the local state primary.
1936

1937
    If the 'force' parameter is given, the '-o' option is passed to
1938
    drbdsetup. Since this is a potentially dangerous operation, the
1939
    force flag should be only given after creation, when it actually
1940
    is mandatory.
1941

1942
    """
1943
    if self.minor is None and not self.Attach():
1944
      logging.error("DRBD cannot attach to a device during open")
1945
      return False
1946
    cmd = ["drbdsetup", self.dev_path, "primary"]
1947
    if force:
1948
      cmd.append("-o")
1949
    result = utils.RunCmd(cmd)
1950
    if result.failed:
1951
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
1952
                  result.output)
1953

    
1954
  def Close(self):
1955
    """Make the local state secondary.
1956

1957
    This will, of course, fail if the device is in use.
1958

1959
    """
1960
    if self.minor is None and not self.Attach():
1961
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
1962
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
1963
    if result.failed:
1964
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
1965
                  self.minor, result.output)
1966

    
1967
  def DisconnectNet(self):
1968
    """Removes network configuration.
1969

1970
    This method shutdowns the network side of the device.
1971

1972
    The method will wait up to a hardcoded timeout for the device to
1973
    go into standalone after the 'disconnect' command before
1974
    re-configuring it, as sometimes it takes a while for the
1975
    disconnect to actually propagate and thus we might issue a 'net'
1976
    command while the device is still connected. If the device will
1977
    still be attached to the network and we time out, we raise an
1978
    exception.
1979

1980
    """
1981
    if self.minor is None:
1982
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
1983

    
1984
    if None in (self._lhost, self._lport, self._rhost, self._rport):
1985
      _ThrowError("drbd%d: DRBD disk missing network info in"
1986
                  " DisconnectNet()", self.minor)
1987

    
1988
    class _DisconnectStatus:
1989
      def __init__(self, ever_disconnected):
1990
        self.ever_disconnected = ever_disconnected
1991

    
1992
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
1993

    
1994
    def _WaitForDisconnect():
1995
      if self.GetProcStatus().is_standalone:
1996
        return
1997

    
1998
      # retry the disconnect, it seems possible that due to a well-time
1999
      # disconnect on the peer, my disconnect command might be ignored and
2000
      # forgotten
2001
      dstatus.ever_disconnected = \
2002
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2003

    
2004
      raise utils.RetryAgain()
2005

    
2006
    # Keep start time
2007
    start_time = time.time()
2008

    
2009
    try:
2010
      # Start delay at 100 milliseconds and grow up to 2 seconds
2011
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2012
                  self._NET_RECONFIG_TIMEOUT)
2013
    except utils.RetryTimeout:
2014
      if dstatus.ever_disconnected:
2015
        msg = ("drbd%d: device did not react to the"
2016
               " 'disconnect' command in a timely manner")
2017
      else:
2018
        msg = "drbd%d: can't shutdown network, even after multiple retries"
2019

    
2020
      _ThrowError(msg, self.minor)
2021

    
2022
    reconfig_time = time.time() - start_time
2023
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2024
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2025
                   self.minor, reconfig_time)
2026

    
2027
  def AttachNet(self, multimaster):
2028
    """Reconnects the network.
2029

2030
    This method connects the network side of the device with a
2031
    specified multi-master flag. The device needs to be 'Standalone'
2032
    but have valid network configuration data.
2033

2034
    Args:
2035
      - multimaster: init the network in dual-primary mode
2036

2037
    """
2038
    if self.minor is None:
2039
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2040

    
2041
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2042
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2043

    
2044
    status = self.GetProcStatus()
2045

    
2046
    if not status.is_standalone:
2047
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2048

    
2049
    self._AssembleNet(self.minor,
2050
                      (self._lhost, self._lport, self._rhost, self._rport),
2051
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2052
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2053

    
2054
  def Attach(self):
2055
    """Check if our minor is configured.
2056

2057
    This doesn't do any device configurations - it only checks if the
2058
    minor is in a state different from Unconfigured.
2059

2060
    Note that this function will not change the state of the system in
2061
    any way (except in case of side-effects caused by reading from
2062
    /proc).
2063

2064
    """
2065
    used_devs = self.GetUsedDevs()
2066
    if self._aminor in used_devs:
2067
      minor = self._aminor
2068
    else:
2069
      minor = None
2070

    
2071
    self._SetFromMinor(minor)
2072
    return minor is not None
2073

    
2074
  def Assemble(self):
2075
    """Assemble the drbd.
2076

2077
    Method:
2078
      - if we have a configured device, we try to ensure that it matches
2079
        our config
2080
      - if not, we create it from zero
2081
      - anyway, set the device parameters
2082

2083
    """
2084
    super(DRBD8, self).Assemble()
2085

    
2086
    self.Attach()
2087
    if self.minor is None:
2088
      # local device completely unconfigured
2089
      self._FastAssemble()
2090
    else:
2091
      # we have to recheck the local and network status and try to fix
2092
      # the device
2093
      self._SlowAssemble()
2094

    
2095
    sync_errors = self.SetSyncParams(self.params)
2096
    if sync_errors:
2097
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2098
                  (self.minor, utils.CommaJoin(sync_errors)))
2099

    
2100
  def _SlowAssemble(self):
2101
    """Assembles the DRBD device from a (partially) configured device.
2102

2103
    In case of partially attached (local device matches but no network
2104
    setup), we perform the network attach. If successful, we re-test
2105
    the attach if can return success.
2106

2107
    """
2108
    # TODO: Rewrite to not use a for loop just because there is 'break'
2109
    # pylint: disable=W0631
2110
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2111
    for minor in (self._aminor,):
2112
      info = self._GetDevInfo(self._GetShowData(minor))
2113
      match_l = self._MatchesLocal(info)
2114
      match_r = self._MatchesNet(info)
2115

    
2116
      if match_l and match_r:
2117
        # everything matches
2118
        break
2119

    
2120
      if match_l and not match_r and "local_addr" not in info:
2121
        # disk matches, but not attached to network, attach and recheck
2122
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2123
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2124
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2125
          break
2126
        else:
2127
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2128
                      " show' disagrees", minor)
2129

    
2130
      if match_r and "local_dev" not in info:
2131
        # no local disk, but network attached and it matches
2132
        self._AssembleLocal(minor, self._children[0].dev_path,
2133
                            self._children[1].dev_path, self.size)
2134
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2135
          break
2136
        else:
2137
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2138
                      " show' disagrees", minor)
2139

    
2140
      # this case must be considered only if we actually have local
2141
      # storage, i.e. not in diskless mode, because all diskless
2142
      # devices are equal from the point of view of local
2143
      # configuration
2144
      if (match_l and "local_dev" in info and
2145
          not match_r and "local_addr" in info):
2146
        # strange case - the device network part points to somewhere
2147
        # else, even though its local storage is ours; as we own the
2148
        # drbd space, we try to disconnect from the remote peer and
2149
        # reconnect to our correct one
2150
        try:
2151
          self._ShutdownNet(minor)
2152
        except errors.BlockDeviceError, err:
2153
          _ThrowError("drbd%d: device has correct local storage, wrong"
2154
                      " remote peer and is unable to disconnect in order"
2155
                      " to attach to the correct peer: %s", minor, str(err))
2156
        # note: _AssembleNet also handles the case when we don't want
2157
        # local storage (i.e. one or more of the _[lr](host|port) is
2158
        # None)
2159
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2160
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2161
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2162
          break
2163
        else:
2164
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2165
                      " show' disagrees", minor)
2166

    
2167
    else:
2168
      minor = None
2169

    
2170
    self._SetFromMinor(minor)
2171
    if minor is None:
2172
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2173
                  self._aminor)
2174

    
2175
  def _FastAssemble(self):
2176
    """Assemble the drbd device from zero.
2177

2178
    This is run when in Assemble we detect our minor is unused.
2179

2180
    """
2181
    minor = self._aminor
2182
    if self._children and self._children[0] and self._children[1]:
2183
      self._AssembleLocal(minor, self._children[0].dev_path,
2184
                          self._children[1].dev_path, self.size)
2185
    if self._lhost and self._lport and self._rhost and self._rport:
2186
      self._AssembleNet(minor,
2187
                        (self._lhost, self._lport, self._rhost, self._rport),
2188
                        constants.DRBD_NET_PROTOCOL,
2189
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2190
    self._SetFromMinor(minor)
2191

    
2192
  @classmethod
2193
  def _ShutdownLocal(cls, minor):
2194
    """Detach from the local device.
2195

2196
    I/Os will continue to be served from the remote device. If we
2197
    don't have a remote device, this operation will fail.
2198

2199
    """
2200
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2201
    if result.failed:
2202
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2203

    
2204
  @classmethod
2205
  def _ShutdownNet(cls, minor):
2206
    """Disconnect from the remote peer.
2207

2208
    This fails if we don't have a local device.
2209

2210
    """
2211
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2212
    if result.failed:
2213
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2214

    
2215
  @classmethod
2216
  def _ShutdownAll(cls, minor):
2217
    """Deactivate the device.
2218

2219
    This will, of course, fail if the device is in use.
2220

2221
    """
2222
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2223
    if result.failed:
2224
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2225
                  minor, result.output)
2226

    
2227
  def Shutdown(self):
2228
    """Shutdown the DRBD device.
2229

2230
    """
2231
    if self.minor is None and not self.Attach():
2232
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2233
      return
2234
    minor = self.minor
2235
    self.minor = None
2236
    self.dev_path = None
2237
    self._ShutdownAll(minor)
2238

    
2239
  def Remove(self):
2240
    """Stub remove for DRBD devices.
2241

2242
    """
2243
    self.Shutdown()
2244

    
2245
  @classmethod
2246
  def Create(cls, unique_id, children, size, params, excl_stor):
2247
    """Create a new DRBD8 device.
2248

2249
    Since DRBD devices are not created per se, just assembled, this
2250
    function only initializes the metadata.
2251

2252
    """
2253
    if len(children) != 2:
2254
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2255
    if excl_stor:
2256
      raise errors.ProgrammerError("DRBD device requested with"
2257
                                   " exclusive_storage")
2258
    # check that the minor is unused
2259
    aminor = unique_id[4]
2260
    proc_info = cls._MassageProcData(cls._GetProcData())
2261
    if aminor in proc_info:
2262
      status = DRBD8Status(proc_info[aminor])
2263
      in_use = status.is_in_use
2264
    else:
2265
      in_use = False
2266
    if in_use:
2267
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2268
    meta = children[1]
2269
    meta.Assemble()
2270
    if not meta.Attach():
2271
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2272
                  aminor, meta)
2273
    cls._CheckMetaSize(meta.dev_path)
2274
    cls._InitMeta(aminor, meta.dev_path)
2275
    return cls(unique_id, children, size, params)
2276

    
2277
  def Grow(self, amount, dryrun, backingstore):
2278
    """Resize the DRBD device and its backing storage.
2279

2280
    """
2281
    if self.minor is None:
2282
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2283
    if len(self._children) != 2 or None in self._children:
2284
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2285
    self._children[0].Grow(amount, dryrun, backingstore)
2286
    if dryrun or backingstore:
2287
      # DRBD does not support dry-run mode and is not backing storage,
2288
      # so we'll return here
2289
      return
2290
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2291
                           "%dm" % (self.size + amount)])
2292
    if result.failed:
2293
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2294

    
2295

    
2296
class FileStorage(BlockDev):
2297
  """File device.
2298

2299
  This class represents the a file storage backend device.
2300

2301
  The unique_id for the file device is a (file_driver, file_path) tuple.
2302

2303
  """
2304
  def __init__(self, unique_id, children, size, params):
2305
    """Initalizes a file device backend.
2306

2307
    """
2308
    if children:
2309
      raise errors.BlockDeviceError("Invalid setup for file device")
2310
    super(FileStorage, self).__init__(unique_id, children, size, params)
2311
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2312
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2313
    self.driver = unique_id[0]
2314
    self.dev_path = unique_id[1]
2315

    
2316
    CheckFileStoragePath(self.dev_path)
2317

    
2318
    self.Attach()
2319

    
2320
  def Assemble(self):
2321
    """Assemble the device.
2322

2323
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2324

2325
    """
2326
    if not os.path.exists(self.dev_path):
2327
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2328

    
2329
  def Shutdown(self):
2330
    """Shutdown the device.
2331

2332
    This is a no-op for the file type, as we don't deactivate
2333
    the file on shutdown.
2334

2335
    """
2336
    pass
2337

    
2338
  def Open(self, force=False):
2339
    """Make the device ready for I/O.
2340

2341
    This is a no-op for the file type.
2342

2343
    """
2344
    pass
2345

    
2346
  def Close(self):
2347
    """Notifies that the device will no longer be used for I/O.
2348

2349
    This is a no-op for the file type.
2350

2351
    """
2352
    pass
2353

    
2354
  def Remove(self):
2355
    """Remove the file backing the block device.
2356

2357
    @rtype: boolean
2358
    @return: True if the removal was successful
2359

2360
    """
2361
    try:
2362
      os.remove(self.dev_path)
2363
    except OSError, err:
2364
      if err.errno != errno.ENOENT:
2365
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2366

    
2367
  def Rename(self, new_id):
2368
    """Renames the file.
2369

2370
    """
2371
    # TODO: implement rename for file-based storage
2372
    _ThrowError("Rename is not supported for file-based storage")
2373

    
2374
  def Grow(self, amount, dryrun, backingstore):
2375
    """Grow the file
2376

2377
    @param amount: the amount (in mebibytes) to grow with
2378

2379
    """
2380
    if not backingstore:
2381
      return
2382
    # Check that the file exists
2383
    self.Assemble()
2384
    current_size = self.GetActualSize()
2385
    new_size = current_size + amount * 1024 * 1024
2386
    assert new_size > current_size, "Cannot Grow with a negative amount"
2387
    # We can't really simulate the growth
2388
    if dryrun:
2389
      return
2390
    try:
2391
      f = open(self.dev_path, "a+")
2392
      f.truncate(new_size)
2393
      f.close()
2394
    except EnvironmentError, err:
2395
      _ThrowError("Error in file growth: %", str(err))
2396

    
2397
  def Attach(self):
2398
    """Attach to an existing file.
2399

2400
    Check if this file already exists.
2401

2402
    @rtype: boolean
2403
    @return: True if file exists
2404

2405
    """
2406
    self.attached = os.path.exists(self.dev_path)
2407
    return self.attached
2408

    
2409
  def GetActualSize(self):
2410
    """Return the actual disk size.
2411

2412
    @note: the device needs to be active when this is called
2413

2414
    """
2415
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2416
    try:
2417
      st = os.stat(self.dev_path)
2418
      return st.st_size
2419
    except OSError, err:
2420
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2421

    
2422
  @classmethod
2423
  def Create(cls, unique_id, children, size, params, excl_stor):
2424
    """Create a new file.
2425

2426
    @param size: the size of file in MiB
2427

2428
    @rtype: L{bdev.FileStorage}
2429
    @return: an instance of FileStorage
2430

2431
    """
2432
    if excl_stor:
2433
      raise errors.ProgrammerError("FileStorage device requested with"
2434
                                   " exclusive_storage")
2435
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2436
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2437

    
2438
    dev_path = unique_id[1]
2439

    
2440
    CheckFileStoragePath(dev_path)
2441

    
2442
    try:
2443
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2444
      f = os.fdopen(fd, "w")
2445
      f.truncate(size * 1024 * 1024)
2446
      f.close()
2447
    except EnvironmentError, err:
2448
      if err.errno == errno.EEXIST:
2449
        _ThrowError("File already existing: %s", dev_path)
2450
      _ThrowError("Error in file creation: %", str(err))
2451

    
2452
    return FileStorage(unique_id, children, size, params)
2453

    
2454

    
2455
class PersistentBlockDevice(BlockDev):
2456
  """A block device with persistent node
2457

2458
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2459
  udev helpers are probably required to give persistent, human-friendly
2460
  names.
2461

2462
  For the time being, pathnames are required to lie under /dev.
2463

2464
  """
2465
  def __init__(self, unique_id, children, size, params):
2466
    """Attaches to a static block device.
2467

2468
    The unique_id is a path under /dev.
2469

2470
    """
2471
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2472
                                                params)
2473
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2474
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2475
    self.dev_path = unique_id[1]
2476
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2477
      raise ValueError("Full path '%s' lies outside /dev" %
2478
                              os.path.realpath(self.dev_path))
2479
    # TODO: this is just a safety guard checking that we only deal with devices
2480
    # we know how to handle. In the future this will be integrated with
2481
    # external storage backends and possible values will probably be collected
2482
    # from the cluster configuration.
2483
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2484
      raise ValueError("Got persistent block device of invalid type: %s" %
2485
                       unique_id[0])
2486

    
2487
    self.major = self.minor = None
2488
    self.Attach()
2489

    
2490
  @classmethod
2491
  def Create(cls, unique_id, children, size, params, excl_stor):
2492
    """Create a new device
2493

2494
    This is a noop, we only return a PersistentBlockDevice instance
2495

2496
    """
2497
    if excl_stor:
2498
      raise errors.ProgrammerError("Persistent block device requested with"
2499
                                   " exclusive_storage")
2500
    return PersistentBlockDevice(unique_id, children, 0, params)
2501

    
2502
  def Remove(self):
2503
    """Remove a device
2504

2505
    This is a noop
2506

2507
    """
2508
    pass
2509

    
2510
  def Rename(self, new_id):
2511
    """Rename this device.
2512

2513
    """
2514
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2515

    
2516
  def Attach(self):
2517
    """Attach to an existing block device.
2518

2519

2520
    """
2521
    self.attached = False
2522
    try:
2523
      st = os.stat(self.dev_path)
2524
    except OSError, err:
2525
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2526
      return False
2527

    
2528
    if not stat.S_ISBLK(st.st_mode):
2529
      logging.error("%s is not a block device", self.dev_path)
2530
      return False
2531

    
2532
    self.major = os.major(st.st_rdev)
2533
    self.minor = os.minor(st.st_rdev)
2534
    self.attached = True
2535

    
2536
    return True
2537

    
2538
  def Assemble(self):
2539
    """Assemble the device.
2540

2541
    """
2542
    pass
2543

    
2544
  def Shutdown(self):
2545
    """Shutdown the device.
2546

2547
    """
2548
    pass
2549

    
2550
  def Open(self, force=False):
2551
    """Make the device ready for I/O.
2552

2553
    """
2554
    pass
2555

    
2556
  def Close(self):
2557
    """Notifies that the device will no longer be used for I/O.
2558

2559
    """
2560
    pass
2561

    
2562
  def Grow(self, amount, dryrun, backingstore):
2563
    """Grow the logical volume.
2564

2565
    """
2566
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2567

    
2568

    
2569
class RADOSBlockDevice(BlockDev):
2570
  """A RADOS Block Device (rbd).
2571

2572
  This class implements the RADOS Block Device for the backend. You need
2573
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2574
  this to be functional.
2575

2576
  """
2577
  def __init__(self, unique_id, children, size, params):
2578
    """Attaches to an rbd device.
2579

2580
    """
2581
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2582
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2583
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2584

    
2585
    self.driver, self.rbd_name = unique_id
2586

    
2587
    self.major = self.minor = None
2588
    self.Attach()
2589

    
2590
  @classmethod
2591
  def Create(cls, unique_id, children, size, params, excl_stor):
2592
    """Create a new rbd device.
2593

2594
    Provision a new rbd volume inside a RADOS pool.
2595

2596
    """
2597
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2598
      raise errors.ProgrammerError("Invalid configuration data %s" %
2599
                                   str(unique_id))
2600
    if excl_stor:
2601
      raise errors.ProgrammerError("RBD device requested with"
2602
                                   " exclusive_storage")
2603
    rbd_pool = params[constants.LDP_POOL]
2604
    rbd_name = unique_id[1]
2605

    
2606
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2607
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2608
           rbd_name, "--size", "%s" % size]
2609
    result = utils.RunCmd(cmd)
2610
    if result.failed:
2611
      _ThrowError("rbd creation failed (%s): %s",
2612
                  result.fail_reason, result.output)
2613

    
2614
    return RADOSBlockDevice(unique_id, children, size, params)
2615

    
2616
  def Remove(self):
2617
    """Remove the rbd device.
2618

2619
    """
2620
    rbd_pool = self.params[constants.LDP_POOL]
2621
    rbd_name = self.unique_id[1]
2622

    
2623
    if not self.minor and not self.Attach():
2624
      # The rbd device doesn't exist.
2625
      return
2626

    
2627
    # First shutdown the device (remove mappings).
2628
    self.Shutdown()
2629

    
2630
    # Remove the actual Volume (Image) from the RADOS cluster.
2631
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2632
    result = utils.RunCmd(cmd)
2633
    if result.failed:
2634
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2635
                  result.fail_reason, result.output)
2636

    
2637
  def Rename(self, new_id):
2638
    """Rename this device.
2639

2640
    """
2641
    pass
2642

    
2643
  def Attach(self):
2644
    """Attach to an existing rbd device.
2645

2646
    This method maps the rbd volume that matches our name with
2647
    an rbd device and then attaches to this device.
2648

2649
    """
2650
    self.attached = False
2651

    
2652
    # Map the rbd volume to a block device under /dev
2653
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2654

    
2655
    try:
2656
      st = os.stat(self.dev_path)
2657
    except OSError, err:
2658
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2659
      return False
2660

    
2661
    if not stat.S_ISBLK(st.st_mode):
2662
      logging.error("%s is not a block device", self.dev_path)
2663
      return False
2664

    
2665
    self.major = os.major(st.st_rdev)
2666
    self.minor = os.minor(st.st_rdev)
2667
    self.attached = True
2668

    
2669
    return True
2670

    
2671
  def _MapVolumeToBlockdev(self, unique_id):
2672
    """Maps existing rbd volumes to block devices.
2673

2674
    This method should be idempotent if the mapping already exists.
2675

2676
    @rtype: string
2677
    @return: the block device path that corresponds to the volume
2678

2679
    """
2680
    pool = self.params[constants.LDP_POOL]
2681
    name = unique_id[1]
2682

    
2683
    # Check if the mapping already exists.
2684
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2685
    result = utils.RunCmd(showmap_cmd)
2686
    if result.failed:
2687
      _ThrowError("rbd showmapped failed (%s): %s",
2688
                  result.fail_reason, result.output)
2689

    
2690
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2691

    
2692
    if rbd_dev:
2693
      # The mapping exists. Return it.
2694
      return rbd_dev
2695

    
2696
    # The mapping doesn't exist. Create it.
2697
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2698
    result = utils.RunCmd(map_cmd)
2699
    if result.failed:
2700
      _ThrowError("rbd map failed (%s): %s",
2701
                  result.fail_reason, result.output)
2702

    
2703
    # Find the corresponding rbd device.
2704
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2705
    result = utils.RunCmd(showmap_cmd)
2706
    if result.failed:
2707
      _ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
2708
                  result.fail_reason, result.output)
2709

    
2710
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2711

    
2712
    if not rbd_dev:
2713
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2714
                  " device in output of showmapped, for volume: %s", name)
2715

    
2716
    # The device was successfully mapped. Return it.
2717
    return rbd_dev
2718

    
2719
  @staticmethod
2720
  def _ParseRbdShowmappedOutput(output, volume_name):
2721
    """Parse the output of `rbd showmapped'.
2722

2723
    This method parses the output of `rbd showmapped' and returns
2724
    the rbd block device path (e.g. /dev/rbd0) that matches the
2725
    given rbd volume.
2726

2727
    @type output: string
2728
    @param output: the whole output of `rbd showmapped'
2729
    @type volume_name: string
2730
    @param volume_name: the name of the volume whose device we search for
2731
    @rtype: string or None
2732
    @return: block device path if the volume is mapped, else None
2733

2734
    """
2735
    allfields = 5
2736
    volumefield = 2
2737
    devicefield = 4
2738

    
2739
    field_sep = "\t"
2740

    
2741
    lines = output.splitlines()
2742
    splitted_lines = map(lambda l: l.split(field_sep), lines)
2743

    
2744
    # Check empty output.
2745
    if not splitted_lines:
2746
      _ThrowError("rbd showmapped returned empty output")
2747

    
2748
    # Check showmapped header line, to determine number of fields.
2749
    field_cnt = len(splitted_lines[0])
2750
    if field_cnt != allfields:
2751
      _ThrowError("Cannot parse rbd showmapped output because its format"
2752
                  " seems to have changed; expected %s fields, found %s",
2753
                  allfields, field_cnt)
2754

    
2755
    matched_lines = \
2756
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2757
             splitted_lines)
2758

    
2759
    if len(matched_lines) > 1:
2760
      _ThrowError("The rbd volume %s is mapped more than once."
2761
                  " This shouldn't happen, try to unmap the extra"
2762
                  " devices manually.", volume_name)
2763

    
2764
    if matched_lines:
2765
      # rbd block device found. Return it.
2766
      rbd_dev = matched_lines[0][devicefield]
2767
      return rbd_dev
2768

    
2769
    # The given volume is not mapped.
2770
    return None
2771

    
2772
  def Assemble(self):
2773
    """Assemble the device.
2774

2775
    """
2776
    pass
2777

    
2778
  def Shutdown(self):
2779
    """Shutdown the device.
2780

2781
    """
2782
    if not self.minor and not self.Attach():
2783
      # The rbd device doesn't exist.
2784
      return
2785

    
2786
    # Unmap the block device from the Volume.
2787
    self._UnmapVolumeFromBlockdev(self.unique_id)
2788

    
2789
    self.minor = None
2790
    self.dev_path = None
2791

    
2792
  def _UnmapVolumeFromBlockdev(self, unique_id):
2793
    """Unmaps the rbd device from the Volume it is mapped.
2794

2795
    Unmaps the rbd device from the Volume it was previously mapped to.
2796
    This method should be idempotent if the Volume isn't mapped.
2797

2798
    """
2799
    pool = self.params[constants.LDP_POOL]
2800
    name = unique_id[1]
2801

    
2802
    # Check if the mapping already exists.
2803
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2804
    result = utils.RunCmd(showmap_cmd)
2805
    if result.failed:
2806
      _ThrowError("rbd showmapped failed [during unmap](%s): %s",
2807
                  result.fail_reason, result.output)
2808

    
2809
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2810

    
2811
    if rbd_dev:
2812
      # The mapping exists. Unmap the rbd device.
2813
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2814
      result = utils.RunCmd(unmap_cmd)
2815
      if result.failed:
2816
        _ThrowError("rbd unmap failed (%s): %s",
2817
                    result.fail_reason, result.output)
2818

    
2819
  def Open(self, force=False):
2820
    """Make the device ready for I/O.
2821

2822
    """
2823
    pass
2824

    
2825
  def Close(self):
2826
    """Notifies that the device will no longer be used for I/O.
2827

2828
    """
2829
    pass
2830

    
2831
  def Grow(self, amount, dryrun, backingstore):
2832
    """Grow the Volume.
2833

2834
    @type amount: integer
2835
    @param amount: the amount (in mebibytes) to grow with
2836
    @type dryrun: boolean
2837
    @param dryrun: whether to execute the operation in simulation mode
2838
        only, without actually increasing the size
2839

2840
    """
2841
    if not backingstore:
2842
      return
2843
    if not self.Attach():
2844
      _ThrowError("Can't attach to rbd device during Grow()")
2845

    
2846
    if dryrun:
2847
      # the rbd tool does not support dry runs of resize operations.
2848
      # Since rbd volumes are thinly provisioned, we assume
2849
      # there is always enough free space for the operation.
2850
      return
2851

    
2852
    rbd_pool = self.params[constants.LDP_POOL]
2853
    rbd_name = self.unique_id[1]
2854
    new_size = self.size + amount
2855

    
2856
    # Resize the rbd volume (Image) inside the RADOS cluster.
2857
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2858
           rbd_name, "--size", "%s" % new_size]
2859
    result = utils.RunCmd(cmd)
2860
    if result.failed:
2861
      _ThrowError("rbd resize failed (%s): %s",
2862
                  result.fail_reason, result.output)
2863

    
2864

    
2865
class ExtStorageDevice(BlockDev):
2866
  """A block device provided by an ExtStorage Provider.
2867

2868
  This class implements the External Storage Interface, which means
2869
  handling of the externally provided block devices.
2870

2871
  """
2872
  def __init__(self, unique_id, children, size, params):
2873
    """Attaches to an extstorage block device.
2874

2875
    """
2876
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2877
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2878
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2879

    
2880
    self.driver, self.vol_name = unique_id
2881
    self.ext_params = params
2882

    
2883
    self.major = self.minor = None
2884
    self.Attach()
2885

    
2886
  @classmethod
2887
  def Create(cls, unique_id, children, size, params, excl_stor):
2888
    """Create a new extstorage device.
2889

2890
    Provision a new volume using an extstorage provider, which will
2891
    then be mapped to a block device.
2892

2893
    """
2894
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2895
      raise errors.ProgrammerError("Invalid configuration data %s" %
2896
                                   str(unique_id))
2897
    if excl_stor:
2898
      raise errors.ProgrammerError("extstorage device requested with"
2899
                                   " exclusive_storage")
2900

    
2901
    # Call the External Storage's create script,
2902
    # to provision a new Volume inside the External Storage
2903
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
2904
                      params, str(size))
2905

    
2906
    return ExtStorageDevice(unique_id, children, size, params)
2907

    
2908
  def Remove(self):
2909
    """Remove the extstorage device.
2910

2911
    """
2912
    if not self.minor and not self.Attach():
2913
      # The extstorage device doesn't exist.
2914
      return
2915

    
2916
    # First shutdown the device (remove mappings).
2917
    self.Shutdown()
2918

    
2919
    # Call the External Storage's remove script,
2920
    # to remove the Volume from the External Storage
2921
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
2922
                      self.ext_params)
2923

    
2924
  def Rename(self, new_id):
2925
    """Rename this device.
2926

2927
    """
2928
    pass
2929

    
2930
  def Attach(self):
2931
    """Attach to an existing extstorage device.
2932

2933
    This method maps the extstorage volume that matches our name with
2934
    a corresponding block device and then attaches to this device.
2935

2936
    """
2937
    self.attached = False
2938

    
2939
    # Call the External Storage's attach script,
2940
    # to attach an existing Volume to a block device under /dev
2941
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
2942
                                      self.unique_id, self.ext_params)
2943

    
2944
    try:
2945
      st = os.stat(self.dev_path)
2946
    except OSError, err:
2947
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2948
      return False
2949

    
2950
    if not stat.S_ISBLK(st.st_mode):
2951
      logging.error("%s is not a block device", self.dev_path)
2952
      return False
2953

    
2954
    self.major = os.major(st.st_rdev)
2955
    self.minor = os.minor(st.st_rdev)
2956
    self.attached = True
2957

    
2958
    return True
2959

    
2960
  def Assemble(self):
2961
    """Assemble the device.
2962

2963
    """
2964
    pass
2965

    
2966
  def Shutdown(self):
2967
    """Shutdown the device.
2968

2969
    """
2970
    if not self.minor and not self.Attach():
2971
      # The extstorage device doesn't exist.
2972
      return
2973

    
2974
    # Call the External Storage's detach script,
2975
    # to detach an existing Volume from it's block device under /dev
2976
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
2977
                      self.ext_params)
2978

    
2979
    self.minor = None
2980
    self.dev_path = None
2981

    
2982
  def Open(self, force=False):
2983
    """Make the device ready for I/O.
2984

2985
    """
2986
    pass
2987

    
2988
  def Close(self):
2989
    """Notifies that the device will no longer be used for I/O.
2990

2991
    """
2992
    pass
2993

    
2994
  def Grow(self, amount, dryrun, backingstore):
2995
    """Grow the Volume.
2996

2997
    @type amount: integer
2998
    @param amount: the amount (in mebibytes) to grow with
2999
    @type dryrun: boolean
3000
    @param dryrun: whether to execute the operation in simulation mode
3001
        only, without actually increasing the size
3002

3003
    """
3004
    if not backingstore:
3005
      return
3006
    if not self.Attach():
3007
      _ThrowError("Can't attach to extstorage device during Grow()")
3008

    
3009
    if dryrun:
3010
      # we do not support dry runs of resize operations for now.
3011
      return
3012

    
3013
    new_size = self.size + amount
3014

    
3015
    # Call the External Storage's grow script,
3016
    # to grow an existing Volume inside the External Storage
3017
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3018
                      self.ext_params, str(self.size), grow=str(new_size))
3019

    
3020
  def SetInfo(self, text):
3021
    """Update metadata with info text.
3022

3023
    """
3024
    # Replace invalid characters
3025
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3026
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3027

    
3028
    # Only up to 128 characters are allowed
3029
    text = text[:128]
3030

    
3031
    # Call the External Storage's setinfo script,
3032
    # to set metadata for an existing Volume inside the External Storage
3033
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3034
                      self.ext_params, metadata=text)
3035

    
3036

    
3037
def _ExtStorageAction(action, unique_id, ext_params,
3038
                      size=None, grow=None, metadata=None):
3039
  """Take an External Storage action.
3040

3041
  Take an External Storage action concerning or affecting
3042
  a specific Volume inside the External Storage.
3043

3044
  @type action: string
3045
  @param action: which action to perform. One of:
3046
                 create / remove / grow / attach / detach
3047
  @type unique_id: tuple (driver, vol_name)
3048
  @param unique_id: a tuple containing the type of ExtStorage (driver)
3049
                    and the Volume name
3050
  @type ext_params: dict
3051
  @param ext_params: ExtStorage parameters
3052
  @type size: integer
3053
  @param size: the size of the Volume in mebibytes
3054
  @type grow: integer
3055
  @param grow: the new size in mebibytes (after grow)
3056
  @type metadata: string
3057
  @param metadata: metadata info of the Volume, for use by the provider
3058
  @rtype: None or a block device path (during attach)
3059

3060
  """
3061
  driver, vol_name = unique_id
3062

    
3063
  # Create an External Storage instance of type `driver'
3064
  status, inst_es = ExtStorageFromDisk(driver)
3065
  if not status:
3066
    _ThrowError("%s" % inst_es)
3067

    
3068
  # Create the basic environment for the driver's scripts
3069
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3070
                                      grow, metadata)
3071

    
3072
  # Do not use log file for action `attach' as we need
3073
  # to get the output from RunResult
3074
  # TODO: find a way to have a log file for attach too
3075
  logfile = None
3076
  if action is not constants.ES_ACTION_ATTACH:
3077
    logfile = _VolumeLogName(action, driver, vol_name)
3078

    
3079
  # Make sure the given action results in a valid script
3080
  if action not in constants.ES_SCRIPTS:
3081
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3082
                action)
3083

    
3084
  # Find out which external script to run according the given action
3085
  script_name = action + "_script"
3086
  script = getattr(inst_es, script_name)
3087

    
3088
  # Run the external script
3089
  result = utils.RunCmd([script], env=create_env,
3090
                        cwd=inst_es.path, output=logfile,)
3091
  if result.failed:
3092
    logging.error("External storage's %s command '%s' returned"
3093
                  " error: %s, logfile: %s, output: %s",
3094
                  action, result.cmd, result.fail_reason,
3095
                  logfile, result.output)
3096

    
3097
    # If logfile is 'None' (during attach), it breaks TailFile
3098
    # TODO: have a log file for attach too
3099
    if action is not constants.ES_ACTION_ATTACH:
3100
      lines = [utils.SafeEncode(val)
3101
               for val in utils.TailFile(logfile, lines=20)]
3102
    else:
3103
      lines = result.output[-20:]
3104

    
3105
    _ThrowError("External storage's %s script failed (%s), last"
3106
                " lines of output:\n%s",
3107
                action, result.fail_reason, "\n".join(lines))
3108

    
3109
  if action == constants.ES_ACTION_ATTACH:
3110
    return result.stdout
3111

    
3112

    
3113
def ExtStorageFromDisk(name, base_dir=None):
3114
  """Create an ExtStorage instance from disk.
3115

3116
  This function will return an ExtStorage instance
3117
  if the given name is a valid ExtStorage name.
3118

3119
  @type base_dir: string
3120
  @keyword base_dir: Base directory containing ExtStorage installations.
3121
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3122
  @rtype: tuple
3123
  @return: True and the ExtStorage instance if we find a valid one, or
3124
      False and the diagnose message on error
3125

3126
  """
3127
  if base_dir is None:
3128
    es_base_dir = pathutils.ES_SEARCH_PATH
3129
  else:
3130
    es_base_dir = [base_dir]
3131

    
3132
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3133

    
3134
  if es_dir is None:
3135
    return False, ("Directory for External Storage Provider %s not"
3136
                   " found in search path" % name)
3137

    
3138
  # ES Files dictionary, we will populate it with the absolute path
3139
  # names; if the value is True, then it is a required file, otherwise
3140
  # an optional one
3141
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3142

    
3143
  es_files[constants.ES_PARAMETERS_FILE] = True
3144

    
3145
  for (filename, _) in es_files.items():
3146
    es_files[filename] = utils.PathJoin(es_dir, filename)
3147

    
3148
    try:
3149
      st = os.stat(es_files[filename])
3150
    except EnvironmentError, err:
3151
      return False, ("File '%s' under path '%s' is missing (%s)" %
3152
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3153

    
3154
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3155
      return False, ("File '%s' under path '%s' is not a regular file" %
3156
                     (filename, es_dir))
3157

    
3158
    if filename in constants.ES_SCRIPTS:
3159
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3160
        return False, ("File '%s' under path '%s' is not executable" %
3161
                       (filename, es_dir))
3162

    
3163
  parameters = []
3164
  if constants.ES_PARAMETERS_FILE in es_files:
3165
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3166
    try:
3167
      parameters = utils.ReadFile(parameters_file).splitlines()
3168
    except EnvironmentError, err:
3169
      return False, ("Error while reading the EXT parameters file at %s: %s" %
3170
                     (parameters_file, utils.ErrnoOrStr(err)))
3171
    parameters = [v.split(None, 1) for v in parameters]
3172

    
3173
  es_obj = \
3174
    objects.ExtStorage(name=name, path=es_dir,
3175
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3176
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3177
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3178
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3179
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3180
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3181
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3182
                       supported_parameters=parameters)
3183
  return True, es_obj
3184

    
3185

    
3186
def _ExtStorageEnvironment(unique_id, ext_params,
3187
                           size=None, grow=None, metadata=None):
3188
  """Calculate the environment for an External Storage script.
3189

3190
  @type unique_id: tuple (driver, vol_name)
3191
  @param unique_id: ExtStorage pool and name of the Volume
3192
  @type ext_params: dict
3193
  @param ext_params: the EXT parameters
3194
  @type size: string
3195
  @param size: size of the Volume (in mebibytes)
3196
  @type grow: string
3197
  @param grow: new size of Volume after grow (in mebibytes)
3198
  @type metadata: string
3199
  @param metadata: metadata info of the Volume
3200
  @rtype: dict
3201
  @return: dict of environment variables
3202

3203
  """
3204
  vol_name = unique_id[1]
3205

    
3206
  result = {}
3207
  result["VOL_NAME"] = vol_name
3208

    
3209
  # EXT params
3210
  for pname, pvalue in ext_params.items():
3211
    result["EXTP_%s" % pname.upper()] = str(pvalue)
3212

    
3213
  if size is not None:
3214
    result["VOL_SIZE"] = size
3215

    
3216
  if grow is not None:
3217
    result["VOL_NEW_SIZE"] = grow
3218

    
3219
  if metadata is not None:
3220
    result["VOL_METADATA"] = metadata
3221

    
3222
  return result
3223

    
3224

    
3225
def _VolumeLogName(kind, es_name, volume):
3226
  """Compute the ExtStorage log filename for a given Volume and operation.
3227

3228
  @type kind: string
3229
  @param kind: the operation type (e.g. create, remove etc.)
3230
  @type es_name: string
3231
  @param es_name: the ExtStorage name
3232
  @type volume: string
3233
  @param volume: the name of the Volume inside the External Storage
3234

3235
  """
3236
  # Check if the extstorage log dir is a valid dir
3237
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3238
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3239

    
3240
  # TODO: Use tempfile.mkstemp to create unique filename
3241
  base = ("%s-%s-%s-%s.log" %
3242
          (kind, es_name, volume, utils.TimestampForFilename()))
3243
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3244

    
3245

    
3246
DEV_MAP = {
3247
  constants.LD_LV: LogicalVolume,
3248
  constants.LD_DRBD8: DRBD8,
3249
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3250
  constants.LD_RBD: RADOSBlockDevice,
3251
  constants.LD_EXT: ExtStorageDevice,
3252
  }
3253

    
3254
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3255
  DEV_MAP[constants.LD_FILE] = FileStorage
3256

    
3257

    
3258
def _VerifyDiskType(dev_type):
3259
  if dev_type not in DEV_MAP:
3260
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3261

    
3262

    
3263
def _VerifyDiskParams(disk):
3264
  """Verifies if all disk parameters are set.
3265

3266
  """
3267
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3268
  if missing:
3269
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3270
                                 missing)
3271

    
3272

    
3273
def FindDevice(disk, children):
3274
  """Search for an existing, assembled device.
3275

3276
  This will succeed only if the device exists and is assembled, but it
3277
  does not do any actions in order to activate the device.
3278

3279
  @type disk: L{objects.Disk}
3280
  @param disk: the disk object to find
3281
  @type children: list of L{bdev.BlockDev}
3282
  @param children: the list of block devices that are children of the device
3283
                  represented by the disk parameter
3284

3285
  """
3286
  _VerifyDiskType(disk.dev_type)
3287
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3288
                                  disk.params)
3289
  if not device.attached:
3290
    return None
3291
  return device
3292

    
3293

    
3294
def Assemble(disk, children):
3295
  """Try to attach or assemble an existing device.
3296

3297
  This will attach to assemble the device, as needed, to bring it
3298
  fully up. It must be safe to run on already-assembled devices.
3299

3300
  @type disk: L{objects.Disk}
3301
  @param disk: the disk object to assemble
3302
  @type children: list of L{bdev.BlockDev}
3303
  @param children: the list of block devices that are children of the device
3304
                  represented by the disk parameter
3305

3306
  """
3307
  _VerifyDiskType(disk.dev_type)
3308
  _VerifyDiskParams(disk)
3309
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3310
                                  disk.params)
3311
  device.Assemble()
3312
  return device
3313

    
3314

    
3315
def Create(disk, children, excl_stor):
3316
  """Create a device.
3317

3318
  @type disk: L{objects.Disk}
3319
  @param disk: the disk object to create
3320
  @type children: list of L{bdev.BlockDev}
3321
  @param children: the list of block devices that are children of the device
3322
                  represented by the disk parameter
3323
  @type excl_stor: boolean
3324
  @param excl_stor: Whether exclusive_storage is active
3325

3326
  """
3327
  _VerifyDiskType(disk.dev_type)
3328
  _VerifyDiskParams(disk)
3329
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3330
                                         disk.params, excl_stor)
3331
  return device