Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ 11064155

History | View | Annotate | Download (106.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41

    
42

    
43
# Size of reads in _CanReadDevice
44
_DEVICE_READ_SIZE = 128 * 1024
45

    
46

    
47
def _IgnoreError(fn, *args, **kwargs):
48
  """Executes the given function, ignoring BlockDeviceErrors.
49

50
  This is used in order to simplify the execution of cleanup or
51
  rollback functions.
52

53
  @rtype: boolean
54
  @return: True when fn didn't raise an exception, False otherwise
55

56
  """
57
  try:
58
    fn(*args, **kwargs)
59
    return True
60
  except errors.BlockDeviceError, err:
61
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
62
    return False
63

    
64

    
65
def _ThrowError(msg, *args):
66
  """Log an error to the node daemon and the raise an exception.
67

68
  @type msg: string
69
  @param msg: the text of the exception
70
  @raise errors.BlockDeviceError
71

72
  """
73
  if args:
74
    msg = msg % args
75
  logging.error(msg)
76
  raise errors.BlockDeviceError(msg)
77

    
78

    
79
def _CheckResult(result):
80
  """Throws an error if the given result is a failed one.
81

82
  @param result: result from RunCmd
83

84
  """
85
  if result.failed:
86
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
87
                result.output)
88

    
89

    
90
def _CanReadDevice(path):
91
  """Check if we can read from the given device.
92

93
  This tries to read the first 128k of the device.
94

95
  """
96
  try:
97
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
98
    return True
99
  except EnvironmentError:
100
    logging.warning("Can't read from device %s", path, exc_info=True)
101
    return False
102

    
103

    
104
def _GetForbiddenFileStoragePaths():
105
  """Builds a list of path prefixes which shouldn't be used for file storage.
106

107
  @rtype: frozenset
108

109
  """
110
  paths = set([
111
    "/boot",
112
    "/dev",
113
    "/etc",
114
    "/home",
115
    "/proc",
116
    "/root",
117
    "/sys",
118
    ])
119

    
120
  for prefix in ["", "/usr", "/usr/local"]:
121
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
122
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
123

    
124
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
125

    
126

    
127
def _ComputeWrongFileStoragePaths(paths,
128
                                  _forbidden=_GetForbiddenFileStoragePaths()):
129
  """Cross-checks a list of paths for prefixes considered bad.
130

131
  Some paths, e.g. "/bin", should not be used for file storage.
132

133
  @type paths: list
134
  @param paths: List of paths to be checked
135
  @rtype: list
136
  @return: Sorted list of paths for which the user should be warned
137

138
  """
139
  def _Check(path):
140
    return (not os.path.isabs(path) or
141
            path in _forbidden or
142
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
143

    
144
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
145

    
146

    
147
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
148
  """Returns a list of file storage paths whose prefix is considered bad.
149

150
  See L{_ComputeWrongFileStoragePaths}.
151

152
  """
153
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
154

    
155

    
156
def _CheckFileStoragePath(path, allowed):
157
  """Checks if a path is in a list of allowed paths for file storage.
158

159
  @type path: string
160
  @param path: Path to check
161
  @type allowed: list
162
  @param allowed: List of allowed paths
163
  @raise errors.FileStoragePathError: If the path is not allowed
164

165
  """
166
  if not os.path.isabs(path):
167
    raise errors.FileStoragePathError("File storage path must be absolute,"
168
                                      " got '%s'" % path)
169

    
170
  for i in allowed:
171
    if not os.path.isabs(i):
172
      logging.info("Ignoring relative path '%s' for file storage", i)
173
      continue
174

    
175
    if utils.IsBelowDir(i, path):
176
      break
177
  else:
178
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
179
                                      " storage" % path)
180

    
181

    
182
def _LoadAllowedFileStoragePaths(filename):
183
  """Loads file containing allowed file storage paths.
184

185
  @rtype: list
186
  @return: List of allowed paths (can be an empty list)
187

188
  """
189
  try:
190
    contents = utils.ReadFile(filename)
191
  except EnvironmentError:
192
    return []
193
  else:
194
    return utils.FilterEmptyLinesAndComments(contents)
195

    
196

    
197
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
198
  """Checks if a path is allowed for file storage.
199

200
  @type path: string
201
  @param path: Path to check
202
  @raise errors.FileStoragePathError: If the path is not allowed
203

204
  """
205
  allowed = _LoadAllowedFileStoragePaths(_filename)
206

    
207
  if _ComputeWrongFileStoragePaths([path]):
208
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
209
                                      path)
210

    
211
  _CheckFileStoragePath(path, allowed)
212

    
213

    
214
class BlockDev(object):
215
  """Block device abstract class.
216

217
  A block device can be in the following states:
218
    - not existing on the system, and by `Create()` it goes into:
219
    - existing but not setup/not active, and by `Assemble()` goes into:
220
    - active read-write and by `Open()` it goes into
221
    - online (=used, or ready for use)
222

223
  A device can also be online but read-only, however we are not using
224
  the readonly state (LV has it, if needed in the future) and we are
225
  usually looking at this like at a stack, so it's easier to
226
  conceptualise the transition from not-existing to online and back
227
  like a linear one.
228

229
  The many different states of the device are due to the fact that we
230
  need to cover many device types:
231
    - logical volumes are created, lvchange -a y $lv, and used
232
    - drbd devices are attached to a local disk/remote peer and made primary
233

234
  A block device is identified by three items:
235
    - the /dev path of the device (dynamic)
236
    - a unique ID of the device (static)
237
    - it's major/minor pair (dynamic)
238

239
  Not all devices implement both the first two as distinct items. LVM
240
  logical volumes have their unique ID (the pair volume group, logical
241
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
242
  the /dev path is again dynamic and the unique id is the pair (host1,
243
  dev1), (host2, dev2).
244

245
  You can get to a device in two ways:
246
    - creating the (real) device, which returns you
247
      an attached instance (lvcreate)
248
    - attaching of a python instance to an existing (real) device
249

250
  The second point, the attachement to a device, is different
251
  depending on whether the device is assembled or not. At init() time,
252
  we search for a device with the same unique_id as us. If found,
253
  good. It also means that the device is already assembled. If not,
254
  after assembly we'll have our correct major/minor.
255

256
  """
257
  def __init__(self, unique_id, children, size, params):
258
    self._children = children
259
    self.dev_path = None
260
    self.unique_id = unique_id
261
    self.major = None
262
    self.minor = None
263
    self.attached = False
264
    self.size = size
265
    self.params = params
266

    
267
  def Assemble(self):
268
    """Assemble the device from its components.
269

270
    Implementations of this method by child classes must ensure that:
271
      - after the device has been assembled, it knows its major/minor
272
        numbers; this allows other devices (usually parents) to probe
273
        correctly for their children
274
      - calling this method on an existing, in-use device is safe
275
      - if the device is already configured (and in an OK state),
276
        this method is idempotent
277

278
    """
279
    pass
280

    
281
  def Attach(self):
282
    """Find a device which matches our config and attach to it.
283

284
    """
285
    raise NotImplementedError
286

    
287
  def Close(self):
288
    """Notifies that the device will no longer be used for I/O.
289

290
    """
291
    raise NotImplementedError
292

    
293
  @classmethod
294
  def Create(cls, unique_id, children, size, params, excl_stor):
295
    """Create the device.
296

297
    If the device cannot be created, it will return None
298
    instead. Error messages go to the logging system.
299

300
    Note that for some devices, the unique_id is used, and for other,
301
    the children. The idea is that these two, taken together, are
302
    enough for both creation and assembly (later).
303

304
    """
305
    raise NotImplementedError
306

    
307
  def Remove(self):
308
    """Remove this device.
309

310
    This makes sense only for some of the device types: LV and file
311
    storage. Also note that if the device can't attach, the removal
312
    can't be completed.
313

314
    """
315
    raise NotImplementedError
316

    
317
  def Rename(self, new_id):
318
    """Rename this device.
319

320
    This may or may not make sense for a given device type.
321

322
    """
323
    raise NotImplementedError
324

    
325
  def Open(self, force=False):
326
    """Make the device ready for use.
327

328
    This makes the device ready for I/O. For now, just the DRBD
329
    devices need this.
330

331
    The force parameter signifies that if the device has any kind of
332
    --force thing, it should be used, we know what we are doing.
333

334
    """
335
    raise NotImplementedError
336

    
337
  def Shutdown(self):
338
    """Shut down the device, freeing its children.
339

340
    This undoes the `Assemble()` work, except for the child
341
    assembling; as such, the children on the device are still
342
    assembled after this call.
343

344
    """
345
    raise NotImplementedError
346

    
347
  def SetSyncParams(self, params):
348
    """Adjust the synchronization parameters of the mirror.
349

350
    In case this is not a mirroring device, this is no-op.
351

352
    @param params: dictionary of LD level disk parameters related to the
353
    synchronization.
354
    @rtype: list
355
    @return: a list of error messages, emitted both by the current node and by
356
    children. An empty list means no errors.
357

358
    """
359
    result = []
360
    if self._children:
361
      for child in self._children:
362
        result.extend(child.SetSyncParams(params))
363
    return result
364

    
365
  def PauseResumeSync(self, pause):
366
    """Pause/Resume the sync of the mirror.
367

368
    In case this is not a mirroring device, this is no-op.
369

370
    @param pause: Whether to pause or resume
371

372
    """
373
    result = True
374
    if self._children:
375
      for child in self._children:
376
        result = result and child.PauseResumeSync(pause)
377
    return result
378

    
379
  def GetSyncStatus(self):
380
    """Returns the sync status of the device.
381

382
    If this device is a mirroring device, this function returns the
383
    status of the mirror.
384

385
    If sync_percent is None, it means the device is not syncing.
386

387
    If estimated_time is None, it means we can't estimate
388
    the time needed, otherwise it's the time left in seconds.
389

390
    If is_degraded is True, it means the device is missing
391
    redundancy. This is usually a sign that something went wrong in
392
    the device setup, if sync_percent is None.
393

394
    The ldisk parameter represents the degradation of the local
395
    data. This is only valid for some devices, the rest will always
396
    return False (not degraded).
397

398
    @rtype: objects.BlockDevStatus
399

400
    """
401
    return objects.BlockDevStatus(dev_path=self.dev_path,
402
                                  major=self.major,
403
                                  minor=self.minor,
404
                                  sync_percent=None,
405
                                  estimated_time=None,
406
                                  is_degraded=False,
407
                                  ldisk_status=constants.LDS_OKAY)
408

    
409
  def CombinedSyncStatus(self):
410
    """Calculate the mirror status recursively for our children.
411

412
    The return value is the same as for `GetSyncStatus()` except the
413
    minimum percent and maximum time are calculated across our
414
    children.
415

416
    @rtype: objects.BlockDevStatus
417

418
    """
419
    status = self.GetSyncStatus()
420

    
421
    min_percent = status.sync_percent
422
    max_time = status.estimated_time
423
    is_degraded = status.is_degraded
424
    ldisk_status = status.ldisk_status
425

    
426
    if self._children:
427
      for child in self._children:
428
        child_status = child.GetSyncStatus()
429

    
430
        if min_percent is None:
431
          min_percent = child_status.sync_percent
432
        elif child_status.sync_percent is not None:
433
          min_percent = min(min_percent, child_status.sync_percent)
434

    
435
        if max_time is None:
436
          max_time = child_status.estimated_time
437
        elif child_status.estimated_time is not None:
438
          max_time = max(max_time, child_status.estimated_time)
439

    
440
        is_degraded = is_degraded or child_status.is_degraded
441

    
442
        if ldisk_status is None:
443
          ldisk_status = child_status.ldisk_status
444
        elif child_status.ldisk_status is not None:
445
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
446

    
447
    return objects.BlockDevStatus(dev_path=self.dev_path,
448
                                  major=self.major,
449
                                  minor=self.minor,
450
                                  sync_percent=min_percent,
451
                                  estimated_time=max_time,
452
                                  is_degraded=is_degraded,
453
                                  ldisk_status=ldisk_status)
454

    
455
  def SetInfo(self, text):
456
    """Update metadata with info text.
457

458
    Only supported for some device types.
459

460
    """
461
    for child in self._children:
462
      child.SetInfo(text)
463

    
464
  def Grow(self, amount, dryrun, backingstore):
465
    """Grow the block device.
466

467
    @type amount: integer
468
    @param amount: the amount (in mebibytes) to grow with
469
    @type dryrun: boolean
470
    @param dryrun: whether to execute the operation in simulation mode
471
        only, without actually increasing the size
472
    @param backingstore: whether to execute the operation on backing storage
473
        only, or on "logical" storage only; e.g. DRBD is logical storage,
474
        whereas LVM, file, RBD are backing storage
475

476
    """
477
    raise NotImplementedError
478

    
479
  def GetActualSize(self):
480
    """Return the actual disk size.
481

482
    @note: the device needs to be active when this is called
483

484
    """
485
    assert self.attached, "BlockDevice not attached in GetActualSize()"
486
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
487
    if result.failed:
488
      _ThrowError("blockdev failed (%s): %s",
489
                  result.fail_reason, result.output)
490
    try:
491
      sz = int(result.output.strip())
492
    except (ValueError, TypeError), err:
493
      _ThrowError("Failed to parse blockdev output: %s", str(err))
494
    return sz
495

    
496
  def __repr__(self):
497
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
498
            (self.__class__, self.unique_id, self._children,
499
             self.major, self.minor, self.dev_path))
500

    
501

    
502
class LogicalVolume(BlockDev):
503
  """Logical Volume block device.
504

505
  """
506
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
507
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
508
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
509

    
510
  def __init__(self, unique_id, children, size, params):
511
    """Attaches to a LV device.
512

513
    The unique_id is a tuple (vg_name, lv_name)
514

515
    """
516
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
517
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
518
      raise ValueError("Invalid configuration data %s" % str(unique_id))
519
    self._vg_name, self._lv_name = unique_id
520
    self._ValidateName(self._vg_name)
521
    self._ValidateName(self._lv_name)
522
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
523
    self._degraded = True
524
    self.major = self.minor = self.pe_size = self.stripe_count = None
525
    self.Attach()
526

    
527
  @staticmethod
528
  def _GetStdPvSize(pvs_info):
529
    """Return the the standard PV size (used with exclusive storage).
530

531
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
532
    @rtype: float
533
    @return: size in MiB
534

535
    """
536
    assert len(pvs_info) > 0
537
    smallest = min([pv.size for pv in pvs_info])
538
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
539

    
540
  @staticmethod
541
  def _ComputeNumPvs(size, pvs_info):
542
    """Compute the number of PVs needed for an LV (with exclusive storage).
543

544
    @type size: float
545
    @param size: LV size in MiB
546
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
547
    @rtype: integer
548
    @return: number of PVs needed
549
    """
550
    assert len(pvs_info) > 0
551
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
552
    return int(math.ceil(float(size) / pv_size))
553

    
554
  @staticmethod
555
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
556
    """Return a list of empty PVs, by name.
557

558
    """
559
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
560
    if max_pvs is not None:
561
      empty_pvs = empty_pvs[:max_pvs]
562
    return map((lambda pv: pv.name), empty_pvs)
563

    
564
  @classmethod
565
  def Create(cls, unique_id, children, size, params, excl_stor):
566
    """Create a new logical volume.
567

568
    """
569
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
570
      raise errors.ProgrammerError("Invalid configuration data %s" %
571
                                   str(unique_id))
572
    vg_name, lv_name = unique_id
573
    cls._ValidateName(vg_name)
574
    cls._ValidateName(lv_name)
575
    pvs_info = cls.GetPVInfo([vg_name])
576
    if not pvs_info:
577
      if excl_stor:
578
        msg = "No (empty) PVs found"
579
      else:
580
        msg = "Can't compute PV info for vg %s" % vg_name
581
      _ThrowError(msg)
582
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
583

    
584
    pvlist = [pv.name for pv in pvs_info]
585
    if compat.any(":" in v for v in pvlist):
586
      _ThrowError("Some of your PVs have the invalid character ':' in their"
587
                  " name, this is not supported - please filter them out"
588
                  " in lvm.conf using either 'filter' or 'preferred_names'")
589

    
590
    current_pvs = len(pvlist)
591
    desired_stripes = params[constants.LDP_STRIPES]
592
    stripes = min(current_pvs, desired_stripes)
593

    
594
    if excl_stor:
595
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
596
      if err_msgs:
597
        for m in err_msgs:
598
          logging.warning(m)
599
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
600
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
601
      current_pvs = len(pvlist)
602
      if current_pvs < req_pvs:
603
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
604
                    " %d available, %d needed", size, current_pvs, req_pvs)
605
      assert current_pvs == len(pvlist)
606
      if stripes > current_pvs:
607
        # No warning issued for this, as it's no surprise
608
        stripes = current_pvs
609

    
610
    else:
611
      if stripes < desired_stripes:
612
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
613
                        " available.", desired_stripes, vg_name, current_pvs)
614
      free_size = sum([pv.free for pv in pvs_info])
615
      # The size constraint should have been checked from the master before
616
      # calling the create function.
617
      if free_size < size:
618
        _ThrowError("Not enough free space: required %s,"
619
                    " available %s", size, free_size)
620

    
621
    # If the free space is not well distributed, we won't be able to
622
    # create an optimally-striped volume; in that case, we want to try
623
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
624
    # stripes
625
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
626
    for stripes_arg in range(stripes, 0, -1):
627
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
628
      if not result.failed:
629
        break
630
    if result.failed:
631
      _ThrowError("LV create failed (%s): %s",
632
                  result.fail_reason, result.output)
633
    return LogicalVolume(unique_id, children, size, params)
634

    
635
  @staticmethod
636
  def _GetVolumeInfo(lvm_cmd, fields):
637
    """Returns LVM Volumen infos using lvm_cmd
638

639
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
640
    @param fields: Fields to return
641
    @return: A list of dicts each with the parsed fields
642

643
    """
644
    if not fields:
645
      raise errors.ProgrammerError("No fields specified")
646

    
647
    sep = "|"
648
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
649
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
650

    
651
    result = utils.RunCmd(cmd)
652
    if result.failed:
653
      raise errors.CommandError("Can't get the volume information: %s - %s" %
654
                                (result.fail_reason, result.output))
655

    
656
    data = []
657
    for line in result.stdout.splitlines():
658
      splitted_fields = line.strip().split(sep)
659

    
660
      if len(fields) != len(splitted_fields):
661
        raise errors.CommandError("Can't parse %s output: line '%s'" %
662
                                  (lvm_cmd, line))
663

    
664
      data.append(splitted_fields)
665

    
666
    return data
667

    
668
  @classmethod
669
  def GetPVInfo(cls, vg_names, filter_allocatable=True):
670
    """Get the free space info for PVs in a volume group.
671

672
    @param vg_names: list of volume group names, if empty all will be returned
673
    @param filter_allocatable: whether to skip over unallocatable PVs
674

675
    @rtype: list
676
    @return: list of objects.LvmPvInfo objects
677

678
    """
679
    try:
680
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
681
                                        "pv_attr", "pv_size"])
682
    except errors.GenericError, err:
683
      logging.error("Can't get PV information: %s", err)
684
      return None
685

    
686
    data = []
687
    for (pv_name, vg_name, pv_free, pv_attr, pv_size) in info:
688
      # (possibly) skip over pvs which are not allocatable
689
      if filter_allocatable and pv_attr[0] != "a":
690
        continue
691
      # (possibly) skip over pvs which are not in the right volume group(s)
692
      if vg_names and vg_name not in vg_names:
693
        continue
694
      pvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
695
                              size=float(pv_size), free=float(pv_free),
696
                              attributes=pv_attr)
697
      data.append(pvi)
698

    
699
    return data
700

    
701
  @classmethod
702
  def _GetExclusiveStorageVgFree(cls, vg_name):
703
    """Return the free disk space in the given VG, in exclusive storage mode.
704

705
    @type vg_name: string
706
    @param vg_name: VG name
707
    @rtype: float
708
    @return: free space in MiB
709
    """
710
    pvs_info = cls.GetPVInfo([vg_name])
711
    if not pvs_info:
712
      return 0.0
713
    pv_size = cls._GetStdPvSize(pvs_info)
714
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
715
    return pv_size * num_pvs
716

    
717
  @classmethod
718
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
719
    """Get the free space info for specific VGs.
720

721
    @param vg_names: list of volume group names, if empty all will be returned
722
    @param excl_stor: whether exclusive_storage is enabled
723
    @param filter_readonly: whether to skip over readonly VGs
724

725
    @rtype: list
726
    @return: list of tuples (free_space, total_size, name) with free_space in
727
             MiB
728

729
    """
730
    try:
731
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
732
                                        "vg_size"])
733
    except errors.GenericError, err:
734
      logging.error("Can't get VG information: %s", err)
735
      return None
736

    
737
    data = []
738
    for vg_name, vg_free, vg_attr, vg_size in info:
739
      # (possibly) skip over vgs which are not writable
740
      if filter_readonly and vg_attr[0] == "r":
741
        continue
742
      # (possibly) skip over vgs which are not in the right volume group(s)
743
      if vg_names and vg_name not in vg_names:
744
        continue
745
      # Exclusive storage needs a different concept of free space
746
      if excl_stor:
747
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
748
        assert es_free <= vg_free
749
        vg_free = es_free
750
      data.append((float(vg_free), float(vg_size), vg_name))
751

    
752
    return data
753

    
754
  @classmethod
755
  def _ValidateName(cls, name):
756
    """Validates that a given name is valid as VG or LV name.
757

758
    The list of valid characters and restricted names is taken out of
759
    the lvm(8) manpage, with the simplification that we enforce both
760
    VG and LV restrictions on the names.
761

762
    """
763
    if (not cls._VALID_NAME_RE.match(name) or
764
        name in cls._INVALID_NAMES or
765
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
766
      _ThrowError("Invalid LVM name '%s'", name)
767

    
768
  def Remove(self):
769
    """Remove this logical volume.
770

771
    """
772
    if not self.minor and not self.Attach():
773
      # the LV does not exist
774
      return
775
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
776
                           (self._vg_name, self._lv_name)])
777
    if result.failed:
778
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
779

    
780
  def Rename(self, new_id):
781
    """Rename this logical volume.
782

783
    """
784
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
785
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
786
    new_vg, new_name = new_id
787
    if new_vg != self._vg_name:
788
      raise errors.ProgrammerError("Can't move a logical volume across"
789
                                   " volume groups (from %s to to %s)" %
790
                                   (self._vg_name, new_vg))
791
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
792
    if result.failed:
793
      _ThrowError("Failed to rename the logical volume: %s", result.output)
794
    self._lv_name = new_name
795
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
796

    
797
  def Attach(self):
798
    """Attach to an existing LV.
799

800
    This method will try to see if an existing and active LV exists
801
    which matches our name. If so, its major/minor will be
802
    recorded.
803

804
    """
805
    self.attached = False
806
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
807
                           "--units=m", "--nosuffix",
808
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
809
                           "vg_extent_size,stripes", self.dev_path])
810
    if result.failed:
811
      logging.error("Can't find LV %s: %s, %s",
812
                    self.dev_path, result.fail_reason, result.output)
813
      return False
814
    # the output can (and will) have multiple lines for multi-segment
815
    # LVs, as the 'stripes' parameter is a segment one, so we take
816
    # only the last entry, which is the one we're interested in; note
817
    # that with LVM2 anyway the 'stripes' value must be constant
818
    # across segments, so this is a no-op actually
819
    out = result.stdout.splitlines()
820
    if not out: # totally empty result? splitlines() returns at least
821
                # one line for any non-empty string
822
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
823
      return False
824
    out = out[-1].strip().rstrip(",")
825
    out = out.split(",")
826
    if len(out) != 5:
827
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
828
      return False
829

    
830
    status, major, minor, pe_size, stripes = out
831
    if len(status) < 6:
832
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
833
      return False
834

    
835
    try:
836
      major = int(major)
837
      minor = int(minor)
838
    except (TypeError, ValueError), err:
839
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
840

    
841
    try:
842
      pe_size = int(float(pe_size))
843
    except (TypeError, ValueError), err:
844
      logging.error("Can't parse vg extent size: %s", err)
845
      return False
846

    
847
    try:
848
      stripes = int(stripes)
849
    except (TypeError, ValueError), err:
850
      logging.error("Can't parse the number of stripes: %s", err)
851
      return False
852

    
853
    self.major = major
854
    self.minor = minor
855
    self.pe_size = pe_size
856
    self.stripe_count = stripes
857
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
858
                                      # storage
859
    self.attached = True
860
    return True
861

    
862
  def Assemble(self):
863
    """Assemble the device.
864

865
    We always run `lvchange -ay` on the LV to ensure it's active before
866
    use, as there were cases when xenvg was not active after boot
867
    (also possibly after disk issues).
868

869
    """
870
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
871
    if result.failed:
872
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
873

    
874
  def Shutdown(self):
875
    """Shutdown the device.
876

877
    This is a no-op for the LV device type, as we don't deactivate the
878
    volumes on shutdown.
879

880
    """
881
    pass
882

    
883
  def GetSyncStatus(self):
884
    """Returns the sync status of the device.
885

886
    If this device is a mirroring device, this function returns the
887
    status of the mirror.
888

889
    For logical volumes, sync_percent and estimated_time are always
890
    None (no recovery in progress, as we don't handle the mirrored LV
891
    case). The is_degraded parameter is the inverse of the ldisk
892
    parameter.
893

894
    For the ldisk parameter, we check if the logical volume has the
895
    'virtual' type, which means it's not backed by existing storage
896
    anymore (read from it return I/O error). This happens after a
897
    physical disk failure and subsequent 'vgreduce --removemissing' on
898
    the volume group.
899

900
    The status was already read in Attach, so we just return it.
901

902
    @rtype: objects.BlockDevStatus
903

904
    """
905
    if self._degraded:
906
      ldisk_status = constants.LDS_FAULTY
907
    else:
908
      ldisk_status = constants.LDS_OKAY
909

    
910
    return objects.BlockDevStatus(dev_path=self.dev_path,
911
                                  major=self.major,
912
                                  minor=self.minor,
913
                                  sync_percent=None,
914
                                  estimated_time=None,
915
                                  is_degraded=self._degraded,
916
                                  ldisk_status=ldisk_status)
917

    
918
  def Open(self, force=False):
919
    """Make the device ready for I/O.
920

921
    This is a no-op for the LV device type.
922

923
    """
924
    pass
925

    
926
  def Close(self):
927
    """Notifies that the device will no longer be used for I/O.
928

929
    This is a no-op for the LV device type.
930

931
    """
932
    pass
933

    
934
  def Snapshot(self, size):
935
    """Create a snapshot copy of an lvm block device.
936

937
    @returns: tuple (vg, lv)
938

939
    """
940
    snap_name = self._lv_name + ".snap"
941

    
942
    # remove existing snapshot if found
943
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
944
    _IgnoreError(snap.Remove)
945

    
946
    vg_info = self.GetVGInfo([self._vg_name], False)
947
    if not vg_info:
948
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
949
    free_size, _, _ = vg_info[0]
950
    if free_size < size:
951
      _ThrowError("Not enough free space: required %s,"
952
                  " available %s", size, free_size)
953

    
954
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
955
                               "-n%s" % snap_name, self.dev_path]))
956

    
957
    return (self._vg_name, snap_name)
958

    
959
  def _RemoveOldInfo(self):
960
    """Try to remove old tags from the lv.
961

962
    """
963
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
964
                           self.dev_path])
965
    _CheckResult(result)
966

    
967
    raw_tags = result.stdout.strip()
968
    if raw_tags:
969
      for tag in raw_tags.split(","):
970
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
971
                                   tag.strip(), self.dev_path]))
972

    
973
  def SetInfo(self, text):
974
    """Update metadata with info text.
975

976
    """
977
    BlockDev.SetInfo(self, text)
978

    
979
    self._RemoveOldInfo()
980

    
981
    # Replace invalid characters
982
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
983
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
984

    
985
    # Only up to 128 characters are allowed
986
    text = text[:128]
987

    
988
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
989

    
990
  def Grow(self, amount, dryrun, backingstore):
991
    """Grow the logical volume.
992

993
    """
994
    if not backingstore:
995
      return
996
    if self.pe_size is None or self.stripe_count is None:
997
      if not self.Attach():
998
        _ThrowError("Can't attach to LV during Grow()")
999
    full_stripe_size = self.pe_size * self.stripe_count
1000
    rest = amount % full_stripe_size
1001
    if rest != 0:
1002
      amount += full_stripe_size - rest
1003
    cmd = ["lvextend", "-L", "+%dm" % amount]
1004
    if dryrun:
1005
      cmd.append("--test")
1006
    # we try multiple algorithms since the 'best' ones might not have
1007
    # space available in the right place, but later ones might (since
1008
    # they have less constraints); also note that only recent LVM
1009
    # supports 'cling'
1010
    for alloc_policy in "contiguous", "cling", "normal":
1011
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1012
      if not result.failed:
1013
        return
1014
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1015

    
1016

    
1017
class DRBD8Status(object):
1018
  """A DRBD status representation class.
1019

1020
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
1021

1022
  """
1023
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1024
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1025
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1026
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1027
                       # Due to a bug in drbd in the kernel, introduced in
1028
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1029
                       "(?:\s|M)"
1030
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1031

    
1032
  CS_UNCONFIGURED = "Unconfigured"
1033
  CS_STANDALONE = "StandAlone"
1034
  CS_WFCONNECTION = "WFConnection"
1035
  CS_WFREPORTPARAMS = "WFReportParams"
1036
  CS_CONNECTED = "Connected"
1037
  CS_STARTINGSYNCS = "StartingSyncS"
1038
  CS_STARTINGSYNCT = "StartingSyncT"
1039
  CS_WFBITMAPS = "WFBitMapS"
1040
  CS_WFBITMAPT = "WFBitMapT"
1041
  CS_WFSYNCUUID = "WFSyncUUID"
1042
  CS_SYNCSOURCE = "SyncSource"
1043
  CS_SYNCTARGET = "SyncTarget"
1044
  CS_PAUSEDSYNCS = "PausedSyncS"
1045
  CS_PAUSEDSYNCT = "PausedSyncT"
1046
  CSET_SYNC = compat.UniqueFrozenset([
1047
    CS_WFREPORTPARAMS,
1048
    CS_STARTINGSYNCS,
1049
    CS_STARTINGSYNCT,
1050
    CS_WFBITMAPS,
1051
    CS_WFBITMAPT,
1052
    CS_WFSYNCUUID,
1053
    CS_SYNCSOURCE,
1054
    CS_SYNCTARGET,
1055
    CS_PAUSEDSYNCS,
1056
    CS_PAUSEDSYNCT,
1057
    ])
1058

    
1059
  DS_DISKLESS = "Diskless"
1060
  DS_ATTACHING = "Attaching" # transient state
1061
  DS_FAILED = "Failed" # transient state, next: diskless
1062
  DS_NEGOTIATING = "Negotiating" # transient state
1063
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1064
  DS_OUTDATED = "Outdated"
1065
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1066
  DS_CONSISTENT = "Consistent"
1067
  DS_UPTODATE = "UpToDate" # normal state
1068

    
1069
  RO_PRIMARY = "Primary"
1070
  RO_SECONDARY = "Secondary"
1071
  RO_UNKNOWN = "Unknown"
1072

    
1073
  def __init__(self, procline):
1074
    u = self.UNCONF_RE.match(procline)
1075
    if u:
1076
      self.cstatus = self.CS_UNCONFIGURED
1077
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1078
    else:
1079
      m = self.LINE_RE.match(procline)
1080
      if not m:
1081
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1082
      self.cstatus = m.group(1)
1083
      self.lrole = m.group(2)
1084
      self.rrole = m.group(3)
1085
      self.ldisk = m.group(4)
1086
      self.rdisk = m.group(5)
1087

    
1088
    # end reading of data from the LINE_RE or UNCONF_RE
1089

    
1090
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1091
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1092
    self.is_connected = self.cstatus == self.CS_CONNECTED
1093
    self.is_primary = self.lrole == self.RO_PRIMARY
1094
    self.is_secondary = self.lrole == self.RO_SECONDARY
1095
    self.peer_primary = self.rrole == self.RO_PRIMARY
1096
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1097
    self.both_primary = self.is_primary and self.peer_primary
1098
    self.both_secondary = self.is_secondary and self.peer_secondary
1099

    
1100
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1101
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1102

    
1103
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1104
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1105

    
1106
    m = self.SYNC_RE.match(procline)
1107
    if m:
1108
      self.sync_percent = float(m.group(1))
1109
      hours = int(m.group(2))
1110
      minutes = int(m.group(3))
1111
      seconds = int(m.group(4))
1112
      self.est_time = hours * 3600 + minutes * 60 + seconds
1113
    else:
1114
      # we have (in this if branch) no percent information, but if
1115
      # we're resyncing we need to 'fake' a sync percent information,
1116
      # as this is how cmdlib determines if it makes sense to wait for
1117
      # resyncing or not
1118
      if self.is_in_resync:
1119
        self.sync_percent = 0
1120
      else:
1121
        self.sync_percent = None
1122
      self.est_time = None
1123

    
1124

    
1125
class BaseDRBD(BlockDev): # pylint: disable=W0223
1126
  """Base DRBD class.
1127

1128
  This class contains a few bits of common functionality between the
1129
  0.7 and 8.x versions of DRBD.
1130

1131
  """
1132
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1133
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1134
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1135
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1136

    
1137
  _DRBD_MAJOR = 147
1138
  _ST_UNCONFIGURED = "Unconfigured"
1139
  _ST_WFCONNECTION = "WFConnection"
1140
  _ST_CONNECTED = "Connected"
1141

    
1142
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1143
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1144

    
1145
  @staticmethod
1146
  def _GetProcData(filename=_STATUS_FILE):
1147
    """Return data from /proc/drbd.
1148

1149
    """
1150
    try:
1151
      data = utils.ReadFile(filename).splitlines()
1152
    except EnvironmentError, err:
1153
      if err.errno == errno.ENOENT:
1154
        _ThrowError("The file %s cannot be opened, check if the module"
1155
                    " is loaded (%s)", filename, str(err))
1156
      else:
1157
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1158
    if not data:
1159
      _ThrowError("Can't read any data from %s", filename)
1160
    return data
1161

    
1162
  @classmethod
1163
  def _MassageProcData(cls, data):
1164
    """Transform the output of _GetProdData into a nicer form.
1165

1166
    @return: a dictionary of minor: joined lines from /proc/drbd
1167
        for that minor
1168

1169
    """
1170
    results = {}
1171
    old_minor = old_line = None
1172
    for line in data:
1173
      if not line: # completely empty lines, as can be returned by drbd8.0+
1174
        continue
1175
      lresult = cls._VALID_LINE_RE.match(line)
1176
      if lresult is not None:
1177
        if old_minor is not None:
1178
          results[old_minor] = old_line
1179
        old_minor = int(lresult.group(1))
1180
        old_line = line
1181
      else:
1182
        if old_minor is not None:
1183
          old_line += " " + line.strip()
1184
    # add last line
1185
    if old_minor is not None:
1186
      results[old_minor] = old_line
1187
    return results
1188

    
1189
  @classmethod
1190
  def _GetVersion(cls, proc_data):
1191
    """Return the DRBD version.
1192

1193
    This will return a dict with keys:
1194
      - k_major
1195
      - k_minor
1196
      - k_point
1197
      - api
1198
      - proto
1199
      - proto2 (only on drbd > 8.2.X)
1200

1201
    """
1202
    first_line = proc_data[0].strip()
1203
    version = cls._VERSION_RE.match(first_line)
1204
    if not version:
1205
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1206
                                    first_line)
1207

    
1208
    values = version.groups()
1209
    retval = {
1210
      "k_major": int(values[0]),
1211
      "k_minor": int(values[1]),
1212
      "k_point": int(values[2]),
1213
      "api": int(values[3]),
1214
      "proto": int(values[4]),
1215
      }
1216
    if values[5] is not None:
1217
      retval["proto2"] = values[5]
1218

    
1219
    return retval
1220

    
1221
  @staticmethod
1222
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1223
    """Returns DRBD usermode_helper currently set.
1224

1225
    """
1226
    try:
1227
      helper = utils.ReadFile(filename).splitlines()[0]
1228
    except EnvironmentError, err:
1229
      if err.errno == errno.ENOENT:
1230
        _ThrowError("The file %s cannot be opened, check if the module"
1231
                    " is loaded (%s)", filename, str(err))
1232
      else:
1233
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1234
    if not helper:
1235
      _ThrowError("Can't read any data from %s", filename)
1236
    return helper
1237

    
1238
  @staticmethod
1239
  def _DevPath(minor):
1240
    """Return the path to a drbd device for a given minor.
1241

1242
    """
1243
    return "/dev/drbd%d" % minor
1244

    
1245
  @classmethod
1246
  def GetUsedDevs(cls):
1247
    """Compute the list of used DRBD devices.
1248

1249
    """
1250
    data = cls._GetProcData()
1251

    
1252
    used_devs = {}
1253
    for line in data:
1254
      match = cls._VALID_LINE_RE.match(line)
1255
      if not match:
1256
        continue
1257
      minor = int(match.group(1))
1258
      state = match.group(2)
1259
      if state == cls._ST_UNCONFIGURED:
1260
        continue
1261
      used_devs[minor] = state, line
1262

    
1263
    return used_devs
1264

    
1265
  def _SetFromMinor(self, minor):
1266
    """Set our parameters based on the given minor.
1267

1268
    This sets our minor variable and our dev_path.
1269

1270
    """
1271
    if minor is None:
1272
      self.minor = self.dev_path = None
1273
      self.attached = False
1274
    else:
1275
      self.minor = minor
1276
      self.dev_path = self._DevPath(minor)
1277
      self.attached = True
1278

    
1279
  @staticmethod
1280
  def _CheckMetaSize(meta_device):
1281
    """Check if the given meta device looks like a valid one.
1282

1283
    This currently only checks the size, which must be around
1284
    128MiB.
1285

1286
    """
1287
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1288
    if result.failed:
1289
      _ThrowError("Failed to get device size: %s - %s",
1290
                  result.fail_reason, result.output)
1291
    try:
1292
      sectors = int(result.stdout)
1293
    except (TypeError, ValueError):
1294
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1295
    num_bytes = sectors * 512
1296
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1297
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1298
    # the maximum *valid* size of the meta device when living on top
1299
    # of LVM is hard to compute: it depends on the number of stripes
1300
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1301
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1302
    # size meta device; as such, we restrict it to 1GB (a little bit
1303
    # too generous, but making assumptions about PE size is hard)
1304
    if num_bytes > 1024 * 1024 * 1024:
1305
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1306

    
1307
  def Rename(self, new_id):
1308
    """Rename a device.
1309

1310
    This is not supported for drbd devices.
1311

1312
    """
1313
    raise errors.ProgrammerError("Can't rename a drbd device")
1314

    
1315

    
1316
class DRBD8(BaseDRBD):
1317
  """DRBD v8.x block device.
1318

1319
  This implements the local host part of the DRBD device, i.e. it
1320
  doesn't do anything to the supposed peer. If you need a fully
1321
  connected DRBD pair, you need to use this class on both hosts.
1322

1323
  The unique_id for the drbd device is a (local_ip, local_port,
1324
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1325
  two children: the data device and the meta_device. The meta device
1326
  is checked for valid size and is zeroed on create.
1327

1328
  """
1329
  _MAX_MINORS = 255
1330
  _PARSE_SHOW = None
1331

    
1332
  # timeout constants
1333
  _NET_RECONFIG_TIMEOUT = 60
1334

    
1335
  # command line options for barriers
1336
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1337
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1338
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1339
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1340

    
1341
  def __init__(self, unique_id, children, size, params):
1342
    if children and children.count(None) > 0:
1343
      children = []
1344
    if len(children) not in (0, 2):
1345
      raise ValueError("Invalid configuration data %s" % str(children))
1346
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1347
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1348
    (self._lhost, self._lport,
1349
     self._rhost, self._rport,
1350
     self._aminor, self._secret) = unique_id
1351
    if children:
1352
      if not _CanReadDevice(children[1].dev_path):
1353
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1354
        children = []
1355
    super(DRBD8, self).__init__(unique_id, children, size, params)
1356
    self.major = self._DRBD_MAJOR
1357
    version = self._GetVersion(self._GetProcData())
1358
    if version["k_major"] != 8:
1359
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1360
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1361
                  version["k_major"], version["k_minor"])
1362

    
1363
    if (self._lhost is not None and self._lhost == self._rhost and
1364
        self._lport == self._rport):
1365
      raise ValueError("Invalid configuration data, same local/remote %s" %
1366
                       (unique_id,))
1367
    self.Attach()
1368

    
1369
  @classmethod
1370
  def _InitMeta(cls, minor, dev_path):
1371
    """Initialize a meta device.
1372

1373
    This will not work if the given minor is in use.
1374

1375
    """
1376
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1377
    # try to auto-detect existing filesystems or similar (see
1378
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1379
    # care about the first 128MB of data in the device, even though it
1380
    # can be bigger
1381
    result = utils.RunCmd([constants.DD_CMD,
1382
                           "if=/dev/zero", "of=%s" % dev_path,
1383
                           "bs=1048576", "count=128", "oflag=direct"])
1384
    if result.failed:
1385
      _ThrowError("Can't wipe the meta device: %s", result.output)
1386

    
1387
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1388
                           "v08", dev_path, "0", "create-md"])
1389
    if result.failed:
1390
      _ThrowError("Can't initialize meta device: %s", result.output)
1391

    
1392
  @classmethod
1393
  def _FindUnusedMinor(cls):
1394
    """Find an unused DRBD device.
1395

1396
    This is specific to 8.x as the minors are allocated dynamically,
1397
    so non-existing numbers up to a max minor count are actually free.
1398

1399
    """
1400
    data = cls._GetProcData()
1401

    
1402
    highest = None
1403
    for line in data:
1404
      match = cls._UNUSED_LINE_RE.match(line)
1405
      if match:
1406
        return int(match.group(1))
1407
      match = cls._VALID_LINE_RE.match(line)
1408
      if match:
1409
        minor = int(match.group(1))
1410
        highest = max(highest, minor)
1411
    if highest is None: # there are no minors in use at all
1412
      return 0
1413
    if highest >= cls._MAX_MINORS:
1414
      logging.error("Error: no free drbd minors!")
1415
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1416
    return highest + 1
1417

    
1418
  @classmethod
1419
  def _GetShowParser(cls):
1420
    """Return a parser for `drbd show` output.
1421

1422
    This will either create or return an already-created parser for the
1423
    output of the command `drbd show`.
1424

1425
    """
1426
    if cls._PARSE_SHOW is not None:
1427
      return cls._PARSE_SHOW
1428

    
1429
    # pyparsing setup
1430
    lbrace = pyp.Literal("{").suppress()
1431
    rbrace = pyp.Literal("}").suppress()
1432
    lbracket = pyp.Literal("[").suppress()
1433
    rbracket = pyp.Literal("]").suppress()
1434
    semi = pyp.Literal(";").suppress()
1435
    colon = pyp.Literal(":").suppress()
1436
    # this also converts the value to an int
1437
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1438

    
1439
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1440
    defa = pyp.Literal("_is_default").suppress()
1441
    dbl_quote = pyp.Literal('"').suppress()
1442

    
1443
    keyword = pyp.Word(pyp.alphanums + "-")
1444

    
1445
    # value types
1446
    value = pyp.Word(pyp.alphanums + "_-/.:")
1447
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1448
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1449
                 pyp.Word(pyp.nums + ".") + colon + number)
1450
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1451
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1452
                 pyp.Optional(rbracket) + colon + number)
1453
    # meta device, extended syntax
1454
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1455
    # device name, extended syntax
1456
    device_value = pyp.Literal("minor").suppress() + number
1457

    
1458
    # a statement
1459
    stmt = (~rbrace + keyword + ~lbrace +
1460
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1461
                         device_value) +
1462
            pyp.Optional(defa) + semi +
1463
            pyp.Optional(pyp.restOfLine).suppress())
1464

    
1465
    # an entire section
1466
    section_name = pyp.Word(pyp.alphas + "_")
1467
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1468

    
1469
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1470
    bnf.ignore(comment)
1471

    
1472
    cls._PARSE_SHOW = bnf
1473

    
1474
    return bnf
1475

    
1476
  @classmethod
1477
  def _GetShowData(cls, minor):
1478
    """Return the `drbdsetup show` data for a minor.
1479

1480
    """
1481
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1482
    if result.failed:
1483
      logging.error("Can't display the drbd config: %s - %s",
1484
                    result.fail_reason, result.output)
1485
      return None
1486
    return result.stdout
1487

    
1488
  @classmethod
1489
  def _GetDevInfo(cls, out):
1490
    """Parse details about a given DRBD minor.
1491

1492
    This return, if available, the local backing device (as a path)
1493
    and the local and remote (ip, port) information from a string
1494
    containing the output of the `drbdsetup show` command as returned
1495
    by _GetShowData.
1496

1497
    """
1498
    data = {}
1499
    if not out:
1500
      return data
1501

    
1502
    bnf = cls._GetShowParser()
1503
    # run pyparse
1504

    
1505
    try:
1506
      results = bnf.parseString(out)
1507
    except pyp.ParseException, err:
1508
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1509

    
1510
    # and massage the results into our desired format
1511
    for section in results:
1512
      sname = section[0]
1513
      if sname == "_this_host":
1514
        for lst in section[1:]:
1515
          if lst[0] == "disk":
1516
            data["local_dev"] = lst[1]
1517
          elif lst[0] == "meta-disk":
1518
            data["meta_dev"] = lst[1]
1519
            data["meta_index"] = lst[2]
1520
          elif lst[0] == "address":
1521
            data["local_addr"] = tuple(lst[1:])
1522
      elif sname == "_remote_host":
1523
        for lst in section[1:]:
1524
          if lst[0] == "address":
1525
            data["remote_addr"] = tuple(lst[1:])
1526
    return data
1527

    
1528
  def _MatchesLocal(self, info):
1529
    """Test if our local config matches with an existing device.
1530

1531
    The parameter should be as returned from `_GetDevInfo()`. This
1532
    method tests if our local backing device is the same as the one in
1533
    the info parameter, in effect testing if we look like the given
1534
    device.
1535

1536
    """
1537
    if self._children:
1538
      backend, meta = self._children
1539
    else:
1540
      backend = meta = None
1541

    
1542
    if backend is not None:
1543
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1544
    else:
1545
      retval = ("local_dev" not in info)
1546

    
1547
    if meta is not None:
1548
      retval = retval and ("meta_dev" in info and
1549
                           info["meta_dev"] == meta.dev_path)
1550
      retval = retval and ("meta_index" in info and
1551
                           info["meta_index"] == 0)
1552
    else:
1553
      retval = retval and ("meta_dev" not in info and
1554
                           "meta_index" not in info)
1555
    return retval
1556

    
1557
  def _MatchesNet(self, info):
1558
    """Test if our network config matches with an existing device.
1559

1560
    The parameter should be as returned from `_GetDevInfo()`. This
1561
    method tests if our network configuration is the same as the one
1562
    in the info parameter, in effect testing if we look like the given
1563
    device.
1564

1565
    """
1566
    if (((self._lhost is None and not ("local_addr" in info)) and
1567
         (self._rhost is None and not ("remote_addr" in info)))):
1568
      return True
1569

    
1570
    if self._lhost is None:
1571
      return False
1572

    
1573
    if not ("local_addr" in info and
1574
            "remote_addr" in info):
1575
      return False
1576

    
1577
    retval = (info["local_addr"] == (self._lhost, self._lport))
1578
    retval = (retval and
1579
              info["remote_addr"] == (self._rhost, self._rport))
1580
    return retval
1581

    
1582
  def _AssembleLocal(self, minor, backend, meta, size):
1583
    """Configure the local part of a DRBD device.
1584

1585
    """
1586
    args = ["drbdsetup", self._DevPath(minor), "disk",
1587
            backend, meta, "0",
1588
            "-e", "detach",
1589
            "--create-device"]
1590
    if size:
1591
      args.extend(["-d", "%sm" % size])
1592

    
1593
    version = self._GetVersion(self._GetProcData())
1594
    vmaj = version["k_major"]
1595
    vmin = version["k_minor"]
1596
    vrel = version["k_point"]
1597

    
1598
    barrier_args = \
1599
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1600
                                   self.params[constants.LDP_BARRIERS],
1601
                                   self.params[constants.LDP_NO_META_FLUSH])
1602
    args.extend(barrier_args)
1603

    
1604
    if self.params[constants.LDP_DISK_CUSTOM]:
1605
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1606

    
1607
    result = utils.RunCmd(args)
1608
    if result.failed:
1609
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1610

    
1611
  @classmethod
1612
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1613
                              disable_meta_flush):
1614
    """Compute the DRBD command line parameters for disk barriers
1615

1616
    Returns a list of the disk barrier parameters as requested via the
1617
    disabled_barriers and disable_meta_flush arguments, and according to the
1618
    supported ones in the DRBD version vmaj.vmin.vrel
1619

1620
    If the desired option is unsupported, raises errors.BlockDeviceError.
1621

1622
    """
1623
    disabled_barriers_set = frozenset(disabled_barriers)
1624
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1625
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1626
                                    " barriers" % disabled_barriers)
1627

    
1628
    args = []
1629

    
1630
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1631
    # does not exist)
1632
    if not vmaj == 8 and vmin in (0, 2, 3):
1633
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1634
                                    (vmaj, vmin, vrel))
1635

    
1636
    def _AppendOrRaise(option, min_version):
1637
      """Helper for DRBD options"""
1638
      if min_version is not None and vrel >= min_version:
1639
        args.append(option)
1640
      else:
1641
        raise errors.BlockDeviceError("Could not use the option %s as the"
1642
                                      " DRBD version %d.%d.%d does not support"
1643
                                      " it." % (option, vmaj, vmin, vrel))
1644

    
1645
    # the minimum version for each feature is encoded via pairs of (minor
1646
    # version -> x) where x is version in which support for the option was
1647
    # introduced.
1648
    meta_flush_supported = disk_flush_supported = {
1649
      0: 12,
1650
      2: 7,
1651
      3: 0,
1652
      }
1653

    
1654
    disk_drain_supported = {
1655
      2: 7,
1656
      3: 0,
1657
      }
1658

    
1659
    disk_barriers_supported = {
1660
      3: 0,
1661
      }
1662

    
1663
    # meta flushes
1664
    if disable_meta_flush:
1665
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1666
                     meta_flush_supported.get(vmin, None))
1667

    
1668
    # disk flushes
1669
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1670
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1671
                     disk_flush_supported.get(vmin, None))
1672

    
1673
    # disk drain
1674
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1675
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1676
                     disk_drain_supported.get(vmin, None))
1677

    
1678
    # disk barriers
1679
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1680
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1681
                     disk_barriers_supported.get(vmin, None))
1682

    
1683
    return args
1684

    
1685
  def _AssembleNet(self, minor, net_info, protocol,
1686
                   dual_pri=False, hmac=None, secret=None):
1687
    """Configure the network part of the device.
1688

1689
    """
1690
    lhost, lport, rhost, rport = net_info
1691
    if None in net_info:
1692
      # we don't want network connection and actually want to make
1693
      # sure its shutdown
1694
      self._ShutdownNet(minor)
1695
      return
1696

    
1697
    # Workaround for a race condition. When DRBD is doing its dance to
1698
    # establish a connection with its peer, it also sends the
1699
    # synchronization speed over the wire. In some cases setting the
1700
    # sync speed only after setting up both sides can race with DRBD
1701
    # connecting, hence we set it here before telling DRBD anything
1702
    # about its peer.
1703
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1704
    if sync_errors:
1705
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1706
                  (minor, utils.CommaJoin(sync_errors)))
1707

    
1708
    if netutils.IP6Address.IsValid(lhost):
1709
      if not netutils.IP6Address.IsValid(rhost):
1710
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1711
                    (minor, lhost, rhost))
1712
      family = "ipv6"
1713
    elif netutils.IP4Address.IsValid(lhost):
1714
      if not netutils.IP4Address.IsValid(rhost):
1715
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1716
                    (minor, lhost, rhost))
1717
      family = "ipv4"
1718
    else:
1719
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1720

    
1721
    args = ["drbdsetup", self._DevPath(minor), "net",
1722
            "%s:%s:%s" % (family, lhost, lport),
1723
            "%s:%s:%s" % (family, rhost, rport), protocol,
1724
            "-A", "discard-zero-changes",
1725
            "-B", "consensus",
1726
            "--create-device",
1727
            ]
1728
    if dual_pri:
1729
      args.append("-m")
1730
    if hmac and secret:
1731
      args.extend(["-a", hmac, "-x", secret])
1732

    
1733
    if self.params[constants.LDP_NET_CUSTOM]:
1734
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1735

    
1736
    result = utils.RunCmd(args)
1737
    if result.failed:
1738
      _ThrowError("drbd%d: can't setup network: %s - %s",
1739
                  minor, result.fail_reason, result.output)
1740

    
1741
    def _CheckNetworkConfig():
1742
      info = self._GetDevInfo(self._GetShowData(minor))
1743
      if not "local_addr" in info or not "remote_addr" in info:
1744
        raise utils.RetryAgain()
1745

    
1746
      if (info["local_addr"] != (lhost, lport) or
1747
          info["remote_addr"] != (rhost, rport)):
1748
        raise utils.RetryAgain()
1749

    
1750
    try:
1751
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1752
    except utils.RetryTimeout:
1753
      _ThrowError("drbd%d: timeout while configuring network", minor)
1754

    
1755
  def AddChildren(self, devices):
1756
    """Add a disk to the DRBD device.
1757

1758
    """
1759
    if self.minor is None:
1760
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1761
                  self._aminor)
1762
    if len(devices) != 2:
1763
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1764
    info = self._GetDevInfo(self._GetShowData(self.minor))
1765
    if "local_dev" in info:
1766
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1767
    backend, meta = devices
1768
    if backend.dev_path is None or meta.dev_path is None:
1769
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1770
    backend.Open()
1771
    meta.Open()
1772
    self._CheckMetaSize(meta.dev_path)
1773
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1774

    
1775
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1776
    self._children = devices
1777

    
1778
  def RemoveChildren(self, devices):
1779
    """Detach the drbd device from local storage.
1780

1781
    """
1782
    if self.minor is None:
1783
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1784
                  self._aminor)
1785
    # early return if we don't actually have backing storage
1786
    info = self._GetDevInfo(self._GetShowData(self.minor))
1787
    if "local_dev" not in info:
1788
      return
1789
    if len(self._children) != 2:
1790
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1791
                  self._children)
1792
    if self._children.count(None) == 2: # we don't actually have children :)
1793
      logging.warning("drbd%d: requested detach while detached", self.minor)
1794
      return
1795
    if len(devices) != 2:
1796
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1797
    for child, dev in zip(self._children, devices):
1798
      if dev != child.dev_path:
1799
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1800
                    " RemoveChildren", self.minor, dev, child.dev_path)
1801

    
1802
    self._ShutdownLocal(self.minor)
1803
    self._children = []
1804

    
1805
  @classmethod
1806
  def _SetMinorSyncParams(cls, minor, params):
1807
    """Set the parameters of the DRBD syncer.
1808

1809
    This is the low-level implementation.
1810

1811
    @type minor: int
1812
    @param minor: the drbd minor whose settings we change
1813
    @type params: dict
1814
    @param params: LD level disk parameters related to the synchronization
1815
    @rtype: list
1816
    @return: a list of error messages
1817

1818
    """
1819

    
1820
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1821
    if params[constants.LDP_DYNAMIC_RESYNC]:
1822
      version = cls._GetVersion(cls._GetProcData())
1823
      vmin = version["k_minor"]
1824
      vrel = version["k_point"]
1825

    
1826
      # By definition we are using 8.x, so just check the rest of the version
1827
      # number
1828
      if vmin != 3 or vrel < 9:
1829
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1830
               "dynamic resync speed controller" % (vmin, vrel))
1831
        logging.error(msg)
1832
        return [msg]
1833

    
1834
      if params[constants.LDP_PLAN_AHEAD] == 0:
1835
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1836
               " controller at DRBD level. If you want to disable it, please"
1837
               " set the dynamic-resync disk parameter to False.")
1838
        logging.error(msg)
1839
        return [msg]
1840

    
1841
      # add the c-* parameters to args
1842
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1843
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1844
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1845
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1846
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1847
                   ])
1848

    
1849
    else:
1850
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1851

    
1852
    args.append("--create-device")
1853
    result = utils.RunCmd(args)
1854
    if result.failed:
1855
      msg = ("Can't change syncer rate: %s - %s" %
1856
             (result.fail_reason, result.output))
1857
      logging.error(msg)
1858
      return [msg]
1859

    
1860
    return []
1861

    
1862
  def SetSyncParams(self, params):
1863
    """Set the synchronization parameters of the DRBD syncer.
1864

1865
    @type params: dict
1866
    @param params: LD level disk parameters related to the synchronization
1867
    @rtype: list
1868
    @return: a list of error messages, emitted both by the current node and by
1869
    children. An empty list means no errors
1870

1871
    """
1872
    if self.minor is None:
1873
      err = "Not attached during SetSyncParams"
1874
      logging.info(err)
1875
      return [err]
1876

    
1877
    children_result = super(DRBD8, self).SetSyncParams(params)
1878
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1879
    return children_result
1880

    
1881
  def PauseResumeSync(self, pause):
1882
    """Pauses or resumes the sync of a DRBD device.
1883

1884
    @param pause: Wether to pause or resume
1885
    @return: the success of the operation
1886

1887
    """
1888
    if self.minor is None:
1889
      logging.info("Not attached during PauseSync")
1890
      return False
1891

    
1892
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1893

    
1894
    if pause:
1895
      cmd = "pause-sync"
1896
    else:
1897
      cmd = "resume-sync"
1898

    
1899
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1900
    if result.failed:
1901
      logging.error("Can't %s: %s - %s", cmd,
1902
                    result.fail_reason, result.output)
1903
    return not result.failed and children_result
1904

    
1905
  def GetProcStatus(self):
1906
    """Return device data from /proc.
1907

1908
    """
1909
    if self.minor is None:
1910
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1911
    proc_info = self._MassageProcData(self._GetProcData())
1912
    if self.minor not in proc_info:
1913
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1914
    return DRBD8Status(proc_info[self.minor])
1915

    
1916
  def GetSyncStatus(self):
1917
    """Returns the sync status of the device.
1918

1919

1920
    If sync_percent is None, it means all is ok
1921
    If estimated_time is None, it means we can't estimate
1922
    the time needed, otherwise it's the time left in seconds.
1923

1924

1925
    We set the is_degraded parameter to True on two conditions:
1926
    network not connected or local disk missing.
1927

1928
    We compute the ldisk parameter based on whether we have a local
1929
    disk or not.
1930

1931
    @rtype: objects.BlockDevStatus
1932

1933
    """
1934
    if self.minor is None and not self.Attach():
1935
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1936

    
1937
    stats = self.GetProcStatus()
1938
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1939

    
1940
    if stats.is_disk_uptodate:
1941
      ldisk_status = constants.LDS_OKAY
1942
    elif stats.is_diskless:
1943
      ldisk_status = constants.LDS_FAULTY
1944
    else:
1945
      ldisk_status = constants.LDS_UNKNOWN
1946

    
1947
    return objects.BlockDevStatus(dev_path=self.dev_path,
1948
                                  major=self.major,
1949
                                  minor=self.minor,
1950
                                  sync_percent=stats.sync_percent,
1951
                                  estimated_time=stats.est_time,
1952
                                  is_degraded=is_degraded,
1953
                                  ldisk_status=ldisk_status)
1954

    
1955
  def Open(self, force=False):
1956
    """Make the local state primary.
1957

1958
    If the 'force' parameter is given, the '-o' option is passed to
1959
    drbdsetup. Since this is a potentially dangerous operation, the
1960
    force flag should be only given after creation, when it actually
1961
    is mandatory.
1962

1963
    """
1964
    if self.minor is None and not self.Attach():
1965
      logging.error("DRBD cannot attach to a device during open")
1966
      return False
1967
    cmd = ["drbdsetup", self.dev_path, "primary"]
1968
    if force:
1969
      cmd.append("-o")
1970
    result = utils.RunCmd(cmd)
1971
    if result.failed:
1972
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
1973
                  result.output)
1974

    
1975
  def Close(self):
1976
    """Make the local state secondary.
1977

1978
    This will, of course, fail if the device is in use.
1979

1980
    """
1981
    if self.minor is None and not self.Attach():
1982
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
1983
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
1984
    if result.failed:
1985
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
1986
                  self.minor, result.output)
1987

    
1988
  def DisconnectNet(self):
1989
    """Removes network configuration.
1990

1991
    This method shutdowns the network side of the device.
1992

1993
    The method will wait up to a hardcoded timeout for the device to
1994
    go into standalone after the 'disconnect' command before
1995
    re-configuring it, as sometimes it takes a while for the
1996
    disconnect to actually propagate and thus we might issue a 'net'
1997
    command while the device is still connected. If the device will
1998
    still be attached to the network and we time out, we raise an
1999
    exception.
2000

2001
    """
2002
    if self.minor is None:
2003
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2004

    
2005
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2006
      _ThrowError("drbd%d: DRBD disk missing network info in"
2007
                  " DisconnectNet()", self.minor)
2008

    
2009
    class _DisconnectStatus:
2010
      def __init__(self, ever_disconnected):
2011
        self.ever_disconnected = ever_disconnected
2012

    
2013
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2014

    
2015
    def _WaitForDisconnect():
2016
      if self.GetProcStatus().is_standalone:
2017
        return
2018

    
2019
      # retry the disconnect, it seems possible that due to a well-time
2020
      # disconnect on the peer, my disconnect command might be ignored and
2021
      # forgotten
2022
      dstatus.ever_disconnected = \
2023
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2024

    
2025
      raise utils.RetryAgain()
2026

    
2027
    # Keep start time
2028
    start_time = time.time()
2029

    
2030
    try:
2031
      # Start delay at 100 milliseconds and grow up to 2 seconds
2032
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2033
                  self._NET_RECONFIG_TIMEOUT)
2034
    except utils.RetryTimeout:
2035
      if dstatus.ever_disconnected:
2036
        msg = ("drbd%d: device did not react to the"
2037
               " 'disconnect' command in a timely manner")
2038
      else:
2039
        msg = "drbd%d: can't shutdown network, even after multiple retries"
2040

    
2041
      _ThrowError(msg, self.minor)
2042

    
2043
    reconfig_time = time.time() - start_time
2044
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2045
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2046
                   self.minor, reconfig_time)
2047

    
2048
  def AttachNet(self, multimaster):
2049
    """Reconnects the network.
2050

2051
    This method connects the network side of the device with a
2052
    specified multi-master flag. The device needs to be 'Standalone'
2053
    but have valid network configuration data.
2054

2055
    Args:
2056
      - multimaster: init the network in dual-primary mode
2057

2058
    """
2059
    if self.minor is None:
2060
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2061

    
2062
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2063
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2064

    
2065
    status = self.GetProcStatus()
2066

    
2067
    if not status.is_standalone:
2068
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2069

    
2070
    self._AssembleNet(self.minor,
2071
                      (self._lhost, self._lport, self._rhost, self._rport),
2072
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2073
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2074

    
2075
  def Attach(self):
2076
    """Check if our minor is configured.
2077

2078
    This doesn't do any device configurations - it only checks if the
2079
    minor is in a state different from Unconfigured.
2080

2081
    Note that this function will not change the state of the system in
2082
    any way (except in case of side-effects caused by reading from
2083
    /proc).
2084

2085
    """
2086
    used_devs = self.GetUsedDevs()
2087
    if self._aminor in used_devs:
2088
      minor = self._aminor
2089
    else:
2090
      minor = None
2091

    
2092
    self._SetFromMinor(minor)
2093
    return minor is not None
2094

    
2095
  def Assemble(self):
2096
    """Assemble the drbd.
2097

2098
    Method:
2099
      - if we have a configured device, we try to ensure that it matches
2100
        our config
2101
      - if not, we create it from zero
2102
      - anyway, set the device parameters
2103

2104
    """
2105
    super(DRBD8, self).Assemble()
2106

    
2107
    self.Attach()
2108
    if self.minor is None:
2109
      # local device completely unconfigured
2110
      self._FastAssemble()
2111
    else:
2112
      # we have to recheck the local and network status and try to fix
2113
      # the device
2114
      self._SlowAssemble()
2115

    
2116
    sync_errors = self.SetSyncParams(self.params)
2117
    if sync_errors:
2118
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2119
                  (self.minor, utils.CommaJoin(sync_errors)))
2120

    
2121
  def _SlowAssemble(self):
2122
    """Assembles the DRBD device from a (partially) configured device.
2123

2124
    In case of partially attached (local device matches but no network
2125
    setup), we perform the network attach. If successful, we re-test
2126
    the attach if can return success.
2127

2128
    """
2129
    # TODO: Rewrite to not use a for loop just because there is 'break'
2130
    # pylint: disable=W0631
2131
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2132
    for minor in (self._aminor,):
2133
      info = self._GetDevInfo(self._GetShowData(minor))
2134
      match_l = self._MatchesLocal(info)
2135
      match_r = self._MatchesNet(info)
2136

    
2137
      if match_l and match_r:
2138
        # everything matches
2139
        break
2140

    
2141
      if match_l and not match_r and "local_addr" not in info:
2142
        # disk matches, but not attached to network, attach and recheck
2143
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2144
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2145
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2146
          break
2147
        else:
2148
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2149
                      " show' disagrees", minor)
2150

    
2151
      if match_r and "local_dev" not in info:
2152
        # no local disk, but network attached and it matches
2153
        self._AssembleLocal(minor, self._children[0].dev_path,
2154
                            self._children[1].dev_path, self.size)
2155
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2156
          break
2157
        else:
2158
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2159
                      " show' disagrees", minor)
2160

    
2161
      # this case must be considered only if we actually have local
2162
      # storage, i.e. not in diskless mode, because all diskless
2163
      # devices are equal from the point of view of local
2164
      # configuration
2165
      if (match_l and "local_dev" in info and
2166
          not match_r and "local_addr" in info):
2167
        # strange case - the device network part points to somewhere
2168
        # else, even though its local storage is ours; as we own the
2169
        # drbd space, we try to disconnect from the remote peer and
2170
        # reconnect to our correct one
2171
        try:
2172
          self._ShutdownNet(minor)
2173
        except errors.BlockDeviceError, err:
2174
          _ThrowError("drbd%d: device has correct local storage, wrong"
2175
                      " remote peer and is unable to disconnect in order"
2176
                      " to attach to the correct peer: %s", minor, str(err))
2177
        # note: _AssembleNet also handles the case when we don't want
2178
        # local storage (i.e. one or more of the _[lr](host|port) is
2179
        # None)
2180
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2181
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2182
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2183
          break
2184
        else:
2185
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2186
                      " show' disagrees", minor)
2187

    
2188
    else:
2189
      minor = None
2190

    
2191
    self._SetFromMinor(minor)
2192
    if minor is None:
2193
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2194
                  self._aminor)
2195

    
2196
  def _FastAssemble(self):
2197
    """Assemble the drbd device from zero.
2198

2199
    This is run when in Assemble we detect our minor is unused.
2200

2201
    """
2202
    minor = self._aminor
2203
    if self._children and self._children[0] and self._children[1]:
2204
      self._AssembleLocal(minor, self._children[0].dev_path,
2205
                          self._children[1].dev_path, self.size)
2206
    if self._lhost and self._lport and self._rhost and self._rport:
2207
      self._AssembleNet(minor,
2208
                        (self._lhost, self._lport, self._rhost, self._rport),
2209
                        constants.DRBD_NET_PROTOCOL,
2210
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2211
    self._SetFromMinor(minor)
2212

    
2213
  @classmethod
2214
  def _ShutdownLocal(cls, minor):
2215
    """Detach from the local device.
2216

2217
    I/Os will continue to be served from the remote device. If we
2218
    don't have a remote device, this operation will fail.
2219

2220
    """
2221
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2222
    if result.failed:
2223
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2224

    
2225
  @classmethod
2226
  def _ShutdownNet(cls, minor):
2227
    """Disconnect from the remote peer.
2228

2229
    This fails if we don't have a local device.
2230

2231
    """
2232
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2233
    if result.failed:
2234
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2235

    
2236
  @classmethod
2237
  def _ShutdownAll(cls, minor):
2238
    """Deactivate the device.
2239

2240
    This will, of course, fail if the device is in use.
2241

2242
    """
2243
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2244
    if result.failed:
2245
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2246
                  minor, result.output)
2247

    
2248
  def Shutdown(self):
2249
    """Shutdown the DRBD device.
2250

2251
    """
2252
    if self.minor is None and not self.Attach():
2253
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2254
      return
2255
    minor = self.minor
2256
    self.minor = None
2257
    self.dev_path = None
2258
    self._ShutdownAll(minor)
2259

    
2260
  def Remove(self):
2261
    """Stub remove for DRBD devices.
2262

2263
    """
2264
    self.Shutdown()
2265

    
2266
  @classmethod
2267
  def Create(cls, unique_id, children, size, params, excl_stor):
2268
    """Create a new DRBD8 device.
2269

2270
    Since DRBD devices are not created per se, just assembled, this
2271
    function only initializes the metadata.
2272

2273
    """
2274
    if len(children) != 2:
2275
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2276
    if excl_stor:
2277
      raise errors.ProgrammerError("DRBD device requested with"
2278
                                   " exclusive_storage")
2279
    # check that the minor is unused
2280
    aminor = unique_id[4]
2281
    proc_info = cls._MassageProcData(cls._GetProcData())
2282
    if aminor in proc_info:
2283
      status = DRBD8Status(proc_info[aminor])
2284
      in_use = status.is_in_use
2285
    else:
2286
      in_use = False
2287
    if in_use:
2288
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2289
    meta = children[1]
2290
    meta.Assemble()
2291
    if not meta.Attach():
2292
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2293
                  aminor, meta)
2294
    cls._CheckMetaSize(meta.dev_path)
2295
    cls._InitMeta(aminor, meta.dev_path)
2296
    return cls(unique_id, children, size, params)
2297

    
2298
  def Grow(self, amount, dryrun, backingstore):
2299
    """Resize the DRBD device and its backing storage.
2300

2301
    """
2302
    if self.minor is None:
2303
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2304
    if len(self._children) != 2 or None in self._children:
2305
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2306
    self._children[0].Grow(amount, dryrun, backingstore)
2307
    if dryrun or backingstore:
2308
      # DRBD does not support dry-run mode and is not backing storage,
2309
      # so we'll return here
2310
      return
2311
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2312
                           "%dm" % (self.size + amount)])
2313
    if result.failed:
2314
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2315

    
2316

    
2317
class FileStorage(BlockDev):
2318
  """File device.
2319

2320
  This class represents the a file storage backend device.
2321

2322
  The unique_id for the file device is a (file_driver, file_path) tuple.
2323

2324
  """
2325
  def __init__(self, unique_id, children, size, params):
2326
    """Initalizes a file device backend.
2327

2328
    """
2329
    if children:
2330
      raise errors.BlockDeviceError("Invalid setup for file device")
2331
    super(FileStorage, self).__init__(unique_id, children, size, params)
2332
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2333
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2334
    self.driver = unique_id[0]
2335
    self.dev_path = unique_id[1]
2336

    
2337
    CheckFileStoragePath(self.dev_path)
2338

    
2339
    self.Attach()
2340

    
2341
  def Assemble(self):
2342
    """Assemble the device.
2343

2344
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2345

2346
    """
2347
    if not os.path.exists(self.dev_path):
2348
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2349

    
2350
  def Shutdown(self):
2351
    """Shutdown the device.
2352

2353
    This is a no-op for the file type, as we don't deactivate
2354
    the file on shutdown.
2355

2356
    """
2357
    pass
2358

    
2359
  def Open(self, force=False):
2360
    """Make the device ready for I/O.
2361

2362
    This is a no-op for the file type.
2363

2364
    """
2365
    pass
2366

    
2367
  def Close(self):
2368
    """Notifies that the device will no longer be used for I/O.
2369

2370
    This is a no-op for the file type.
2371

2372
    """
2373
    pass
2374

    
2375
  def Remove(self):
2376
    """Remove the file backing the block device.
2377

2378
    @rtype: boolean
2379
    @return: True if the removal was successful
2380

2381
    """
2382
    try:
2383
      os.remove(self.dev_path)
2384
    except OSError, err:
2385
      if err.errno != errno.ENOENT:
2386
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2387

    
2388
  def Rename(self, new_id):
2389
    """Renames the file.
2390

2391
    """
2392
    # TODO: implement rename for file-based storage
2393
    _ThrowError("Rename is not supported for file-based storage")
2394

    
2395
  def Grow(self, amount, dryrun, backingstore):
2396
    """Grow the file
2397

2398
    @param amount: the amount (in mebibytes) to grow with
2399

2400
    """
2401
    if not backingstore:
2402
      return
2403
    # Check that the file exists
2404
    self.Assemble()
2405
    current_size = self.GetActualSize()
2406
    new_size = current_size + amount * 1024 * 1024
2407
    assert new_size > current_size, "Cannot Grow with a negative amount"
2408
    # We can't really simulate the growth
2409
    if dryrun:
2410
      return
2411
    try:
2412
      f = open(self.dev_path, "a+")
2413
      f.truncate(new_size)
2414
      f.close()
2415
    except EnvironmentError, err:
2416
      _ThrowError("Error in file growth: %", str(err))
2417

    
2418
  def Attach(self):
2419
    """Attach to an existing file.
2420

2421
    Check if this file already exists.
2422

2423
    @rtype: boolean
2424
    @return: True if file exists
2425

2426
    """
2427
    self.attached = os.path.exists(self.dev_path)
2428
    return self.attached
2429

    
2430
  def GetActualSize(self):
2431
    """Return the actual disk size.
2432

2433
    @note: the device needs to be active when this is called
2434

2435
    """
2436
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2437
    try:
2438
      st = os.stat(self.dev_path)
2439
      return st.st_size
2440
    except OSError, err:
2441
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2442

    
2443
  @classmethod
2444
  def Create(cls, unique_id, children, size, params, excl_stor):
2445
    """Create a new file.
2446

2447
    @param size: the size of file in MiB
2448

2449
    @rtype: L{bdev.FileStorage}
2450
    @return: an instance of FileStorage
2451

2452
    """
2453
    if excl_stor:
2454
      raise errors.ProgrammerError("FileStorage device requested with"
2455
                                   " exclusive_storage")
2456
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2457
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2458

    
2459
    dev_path = unique_id[1]
2460

    
2461
    CheckFileStoragePath(dev_path)
2462

    
2463
    try:
2464
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2465
      f = os.fdopen(fd, "w")
2466
      f.truncate(size * 1024 * 1024)
2467
      f.close()
2468
    except EnvironmentError, err:
2469
      if err.errno == errno.EEXIST:
2470
        _ThrowError("File already existing: %s", dev_path)
2471
      _ThrowError("Error in file creation: %", str(err))
2472

    
2473
    return FileStorage(unique_id, children, size, params)
2474

    
2475

    
2476
class PersistentBlockDevice(BlockDev):
2477
  """A block device with persistent node
2478

2479
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2480
  udev helpers are probably required to give persistent, human-friendly
2481
  names.
2482

2483
  For the time being, pathnames are required to lie under /dev.
2484

2485
  """
2486
  def __init__(self, unique_id, children, size, params):
2487
    """Attaches to a static block device.
2488

2489
    The unique_id is a path under /dev.
2490

2491
    """
2492
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2493
                                                params)
2494
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2495
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2496
    self.dev_path = unique_id[1]
2497
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2498
      raise ValueError("Full path '%s' lies outside /dev" %
2499
                              os.path.realpath(self.dev_path))
2500
    # TODO: this is just a safety guard checking that we only deal with devices
2501
    # we know how to handle. In the future this will be integrated with
2502
    # external storage backends and possible values will probably be collected
2503
    # from the cluster configuration.
2504
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2505
      raise ValueError("Got persistent block device of invalid type: %s" %
2506
                       unique_id[0])
2507

    
2508
    self.major = self.minor = None
2509
    self.Attach()
2510

    
2511
  @classmethod
2512
  def Create(cls, unique_id, children, size, params, excl_stor):
2513
    """Create a new device
2514

2515
    This is a noop, we only return a PersistentBlockDevice instance
2516

2517
    """
2518
    if excl_stor:
2519
      raise errors.ProgrammerError("Persistent block device requested with"
2520
                                   " exclusive_storage")
2521
    return PersistentBlockDevice(unique_id, children, 0, params)
2522

    
2523
  def Remove(self):
2524
    """Remove a device
2525

2526
    This is a noop
2527

2528
    """
2529
    pass
2530

    
2531
  def Rename(self, new_id):
2532
    """Rename this device.
2533

2534
    """
2535
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2536

    
2537
  def Attach(self):
2538
    """Attach to an existing block device.
2539

2540

2541
    """
2542
    self.attached = False
2543
    try:
2544
      st = os.stat(self.dev_path)
2545
    except OSError, err:
2546
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2547
      return False
2548

    
2549
    if not stat.S_ISBLK(st.st_mode):
2550
      logging.error("%s is not a block device", self.dev_path)
2551
      return False
2552

    
2553
    self.major = os.major(st.st_rdev)
2554
    self.minor = os.minor(st.st_rdev)
2555
    self.attached = True
2556

    
2557
    return True
2558

    
2559
  def Assemble(self):
2560
    """Assemble the device.
2561

2562
    """
2563
    pass
2564

    
2565
  def Shutdown(self):
2566
    """Shutdown the device.
2567

2568
    """
2569
    pass
2570

    
2571
  def Open(self, force=False):
2572
    """Make the device ready for I/O.
2573

2574
    """
2575
    pass
2576

    
2577
  def Close(self):
2578
    """Notifies that the device will no longer be used for I/O.
2579

2580
    """
2581
    pass
2582

    
2583
  def Grow(self, amount, dryrun, backingstore):
2584
    """Grow the logical volume.
2585

2586
    """
2587
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2588

    
2589

    
2590
class RADOSBlockDevice(BlockDev):
2591
  """A RADOS Block Device (rbd).
2592

2593
  This class implements the RADOS Block Device for the backend. You need
2594
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2595
  this to be functional.
2596

2597
  """
2598
  def __init__(self, unique_id, children, size, params):
2599
    """Attaches to an rbd device.
2600

2601
    """
2602
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2603
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2604
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2605

    
2606
    self.driver, self.rbd_name = unique_id
2607

    
2608
    self.major = self.minor = None
2609
    self.Attach()
2610

    
2611
  @classmethod
2612
  def Create(cls, unique_id, children, size, params, excl_stor):
2613
    """Create a new rbd device.
2614

2615
    Provision a new rbd volume inside a RADOS pool.
2616

2617
    """
2618
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2619
      raise errors.ProgrammerError("Invalid configuration data %s" %
2620
                                   str(unique_id))
2621
    if excl_stor:
2622
      raise errors.ProgrammerError("RBD device requested with"
2623
                                   " exclusive_storage")
2624
    rbd_pool = params[constants.LDP_POOL]
2625
    rbd_name = unique_id[1]
2626

    
2627
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2628
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2629
           rbd_name, "--size", "%s" % size]
2630
    result = utils.RunCmd(cmd)
2631
    if result.failed:
2632
      _ThrowError("rbd creation failed (%s): %s",
2633
                  result.fail_reason, result.output)
2634

    
2635
    return RADOSBlockDevice(unique_id, children, size, params)
2636

    
2637
  def Remove(self):
2638
    """Remove the rbd device.
2639

2640
    """
2641
    rbd_pool = self.params[constants.LDP_POOL]
2642
    rbd_name = self.unique_id[1]
2643

    
2644
    if not self.minor and not self.Attach():
2645
      # The rbd device doesn't exist.
2646
      return
2647

    
2648
    # First shutdown the device (remove mappings).
2649
    self.Shutdown()
2650

    
2651
    # Remove the actual Volume (Image) from the RADOS cluster.
2652
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2653
    result = utils.RunCmd(cmd)
2654
    if result.failed:
2655
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2656
                  result.fail_reason, result.output)
2657

    
2658
  def Rename(self, new_id):
2659
    """Rename this device.
2660

2661
    """
2662
    pass
2663

    
2664
  def Attach(self):
2665
    """Attach to an existing rbd device.
2666

2667
    This method maps the rbd volume that matches our name with
2668
    an rbd device and then attaches to this device.
2669

2670
    """
2671
    self.attached = False
2672

    
2673
    # Map the rbd volume to a block device under /dev
2674
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2675

    
2676
    try:
2677
      st = os.stat(self.dev_path)
2678
    except OSError, err:
2679
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2680
      return False
2681

    
2682
    if not stat.S_ISBLK(st.st_mode):
2683
      logging.error("%s is not a block device", self.dev_path)
2684
      return False
2685

    
2686
    self.major = os.major(st.st_rdev)
2687
    self.minor = os.minor(st.st_rdev)
2688
    self.attached = True
2689

    
2690
    return True
2691

    
2692
  def _MapVolumeToBlockdev(self, unique_id):
2693
    """Maps existing rbd volumes to block devices.
2694

2695
    This method should be idempotent if the mapping already exists.
2696

2697
    @rtype: string
2698
    @return: the block device path that corresponds to the volume
2699

2700
    """
2701
    pool = self.params[constants.LDP_POOL]
2702
    name = unique_id[1]
2703

    
2704
    # Check if the mapping already exists.
2705
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2706
    result = utils.RunCmd(showmap_cmd)
2707
    if result.failed:
2708
      _ThrowError("rbd showmapped failed (%s): %s",
2709
                  result.fail_reason, result.output)
2710

    
2711
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2712

    
2713
    if rbd_dev:
2714
      # The mapping exists. Return it.
2715
      return rbd_dev
2716

    
2717
    # The mapping doesn't exist. Create it.
2718
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2719
    result = utils.RunCmd(map_cmd)
2720
    if result.failed:
2721
      _ThrowError("rbd map failed (%s): %s",
2722
                  result.fail_reason, result.output)
2723

    
2724
    # Find the corresponding rbd device.
2725
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2726
    result = utils.RunCmd(showmap_cmd)
2727
    if result.failed:
2728
      _ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
2729
                  result.fail_reason, result.output)
2730

    
2731
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2732

    
2733
    if not rbd_dev:
2734
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2735
                  " device in output of showmapped, for volume: %s", name)
2736

    
2737
    # The device was successfully mapped. Return it.
2738
    return rbd_dev
2739

    
2740
  @staticmethod
2741
  def _ParseRbdShowmappedOutput(output, volume_name):
2742
    """Parse the output of `rbd showmapped'.
2743

2744
    This method parses the output of `rbd showmapped' and returns
2745
    the rbd block device path (e.g. /dev/rbd0) that matches the
2746
    given rbd volume.
2747

2748
    @type output: string
2749
    @param output: the whole output of `rbd showmapped'
2750
    @type volume_name: string
2751
    @param volume_name: the name of the volume whose device we search for
2752
    @rtype: string or None
2753
    @return: block device path if the volume is mapped, else None
2754

2755
    """
2756
    allfields = 5
2757
    volumefield = 2
2758
    devicefield = 4
2759

    
2760
    field_sep = "\t"
2761

    
2762
    lines = output.splitlines()
2763
    splitted_lines = map(lambda l: l.split(field_sep), lines)
2764

    
2765
    # Check empty output.
2766
    if not splitted_lines:
2767
      _ThrowError("rbd showmapped returned empty output")
2768

    
2769
    # Check showmapped header line, to determine number of fields.
2770
    field_cnt = len(splitted_lines[0])
2771
    if field_cnt != allfields:
2772
      _ThrowError("Cannot parse rbd showmapped output because its format"
2773
                  " seems to have changed; expected %s fields, found %s",
2774
                  allfields, field_cnt)
2775

    
2776
    matched_lines = \
2777
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2778
             splitted_lines)
2779

    
2780
    if len(matched_lines) > 1:
2781
      _ThrowError("The rbd volume %s is mapped more than once."
2782
                  " This shouldn't happen, try to unmap the extra"
2783
                  " devices manually.", volume_name)
2784

    
2785
    if matched_lines:
2786
      # rbd block device found. Return it.
2787
      rbd_dev = matched_lines[0][devicefield]
2788
      return rbd_dev
2789

    
2790
    # The given volume is not mapped.
2791
    return None
2792

    
2793
  def Assemble(self):
2794
    """Assemble the device.
2795

2796
    """
2797
    pass
2798

    
2799
  def Shutdown(self):
2800
    """Shutdown the device.
2801

2802
    """
2803
    if not self.minor and not self.Attach():
2804
      # The rbd device doesn't exist.
2805
      return
2806

    
2807
    # Unmap the block device from the Volume.
2808
    self._UnmapVolumeFromBlockdev(self.unique_id)
2809

    
2810
    self.minor = None
2811
    self.dev_path = None
2812

    
2813
  def _UnmapVolumeFromBlockdev(self, unique_id):
2814
    """Unmaps the rbd device from the Volume it is mapped.
2815

2816
    Unmaps the rbd device from the Volume it was previously mapped to.
2817
    This method should be idempotent if the Volume isn't mapped.
2818

2819
    """
2820
    pool = self.params[constants.LDP_POOL]
2821
    name = unique_id[1]
2822

    
2823
    # Check if the mapping already exists.
2824
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2825
    result = utils.RunCmd(showmap_cmd)
2826
    if result.failed:
2827
      _ThrowError("rbd showmapped failed [during unmap](%s): %s",
2828
                  result.fail_reason, result.output)
2829

    
2830
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2831

    
2832
    if rbd_dev:
2833
      # The mapping exists. Unmap the rbd device.
2834
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2835
      result = utils.RunCmd(unmap_cmd)
2836
      if result.failed:
2837
        _ThrowError("rbd unmap failed (%s): %s",
2838
                    result.fail_reason, result.output)
2839

    
2840
  def Open(self, force=False):
2841
    """Make the device ready for I/O.
2842

2843
    """
2844
    pass
2845

    
2846
  def Close(self):
2847
    """Notifies that the device will no longer be used for I/O.
2848

2849
    """
2850
    pass
2851

    
2852
  def Grow(self, amount, dryrun, backingstore):
2853
    """Grow the Volume.
2854

2855
    @type amount: integer
2856
    @param amount: the amount (in mebibytes) to grow with
2857
    @type dryrun: boolean
2858
    @param dryrun: whether to execute the operation in simulation mode
2859
        only, without actually increasing the size
2860

2861
    """
2862
    if not backingstore:
2863
      return
2864
    if not self.Attach():
2865
      _ThrowError("Can't attach to rbd device during Grow()")
2866

    
2867
    if dryrun:
2868
      # the rbd tool does not support dry runs of resize operations.
2869
      # Since rbd volumes are thinly provisioned, we assume
2870
      # there is always enough free space for the operation.
2871
      return
2872

    
2873
    rbd_pool = self.params[constants.LDP_POOL]
2874
    rbd_name = self.unique_id[1]
2875
    new_size = self.size + amount
2876

    
2877
    # Resize the rbd volume (Image) inside the RADOS cluster.
2878
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2879
           rbd_name, "--size", "%s" % new_size]
2880
    result = utils.RunCmd(cmd)
2881
    if result.failed:
2882
      _ThrowError("rbd resize failed (%s): %s",
2883
                  result.fail_reason, result.output)
2884

    
2885

    
2886
class ExtStorageDevice(BlockDev):
2887
  """A block device provided by an ExtStorage Provider.
2888

2889
  This class implements the External Storage Interface, which means
2890
  handling of the externally provided block devices.
2891

2892
  """
2893
  def __init__(self, unique_id, children, size, params):
2894
    """Attaches to an extstorage block device.
2895

2896
    """
2897
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2898
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2899
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2900

    
2901
    self.driver, self.vol_name = unique_id
2902
    self.ext_params = params
2903

    
2904
    self.major = self.minor = None
2905
    self.Attach()
2906

    
2907
  @classmethod
2908
  def Create(cls, unique_id, children, size, params, excl_stor):
2909
    """Create a new extstorage device.
2910

2911
    Provision a new volume using an extstorage provider, which will
2912
    then be mapped to a block device.
2913

2914
    """
2915
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2916
      raise errors.ProgrammerError("Invalid configuration data %s" %
2917
                                   str(unique_id))
2918
    if excl_stor:
2919
      raise errors.ProgrammerError("extstorage device requested with"
2920
                                   " exclusive_storage")
2921

    
2922
    # Call the External Storage's create script,
2923
    # to provision a new Volume inside the External Storage
2924
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
2925
                      params, str(size))
2926

    
2927
    return ExtStorageDevice(unique_id, children, size, params)
2928

    
2929
  def Remove(self):
2930
    """Remove the extstorage device.
2931

2932
    """
2933
    if not self.minor and not self.Attach():
2934
      # The extstorage device doesn't exist.
2935
      return
2936

    
2937
    # First shutdown the device (remove mappings).
2938
    self.Shutdown()
2939

    
2940
    # Call the External Storage's remove script,
2941
    # to remove the Volume from the External Storage
2942
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
2943
                      self.ext_params)
2944

    
2945
  def Rename(self, new_id):
2946
    """Rename this device.
2947

2948
    """
2949
    pass
2950

    
2951
  def Attach(self):
2952
    """Attach to an existing extstorage device.
2953

2954
    This method maps the extstorage volume that matches our name with
2955
    a corresponding block device and then attaches to this device.
2956

2957
    """
2958
    self.attached = False
2959

    
2960
    # Call the External Storage's attach script,
2961
    # to attach an existing Volume to a block device under /dev
2962
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
2963
                                      self.unique_id, self.ext_params)
2964

    
2965
    try:
2966
      st = os.stat(self.dev_path)
2967
    except OSError, err:
2968
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2969
      return False
2970

    
2971
    if not stat.S_ISBLK(st.st_mode):
2972
      logging.error("%s is not a block device", self.dev_path)
2973
      return False
2974

    
2975
    self.major = os.major(st.st_rdev)
2976
    self.minor = os.minor(st.st_rdev)
2977
    self.attached = True
2978

    
2979
    return True
2980

    
2981
  def Assemble(self):
2982
    """Assemble the device.
2983

2984
    """
2985
    pass
2986

    
2987
  def Shutdown(self):
2988
    """Shutdown the device.
2989

2990
    """
2991
    if not self.minor and not self.Attach():
2992
      # The extstorage device doesn't exist.
2993
      return
2994

    
2995
    # Call the External Storage's detach script,
2996
    # to detach an existing Volume from it's block device under /dev
2997
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
2998
                      self.ext_params)
2999

    
3000
    self.minor = None
3001
    self.dev_path = None
3002

    
3003
  def Open(self, force=False):
3004
    """Make the device ready for I/O.
3005

3006
    """
3007
    pass
3008

    
3009
  def Close(self):
3010
    """Notifies that the device will no longer be used for I/O.
3011

3012
    """
3013
    pass
3014

    
3015
  def Grow(self, amount, dryrun, backingstore):
3016
    """Grow the Volume.
3017

3018
    @type amount: integer
3019
    @param amount: the amount (in mebibytes) to grow with
3020
    @type dryrun: boolean
3021
    @param dryrun: whether to execute the operation in simulation mode
3022
        only, without actually increasing the size
3023

3024
    """
3025
    if not backingstore:
3026
      return
3027
    if not self.Attach():
3028
      _ThrowError("Can't attach to extstorage device during Grow()")
3029

    
3030
    if dryrun:
3031
      # we do not support dry runs of resize operations for now.
3032
      return
3033

    
3034
    new_size = self.size + amount
3035

    
3036
    # Call the External Storage's grow script,
3037
    # to grow an existing Volume inside the External Storage
3038
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3039
                      self.ext_params, str(self.size), grow=str(new_size))
3040

    
3041
  def SetInfo(self, text):
3042
    """Update metadata with info text.
3043

3044
    """
3045
    # Replace invalid characters
3046
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3047
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3048

    
3049
    # Only up to 128 characters are allowed
3050
    text = text[:128]
3051

    
3052
    # Call the External Storage's setinfo script,
3053
    # to set metadata for an existing Volume inside the External Storage
3054
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3055
                      self.ext_params, metadata=text)
3056

    
3057

    
3058
def _ExtStorageAction(action, unique_id, ext_params,
3059
                      size=None, grow=None, metadata=None):
3060
  """Take an External Storage action.
3061

3062
  Take an External Storage action concerning or affecting
3063
  a specific Volume inside the External Storage.
3064

3065
  @type action: string
3066
  @param action: which action to perform. One of:
3067
                 create / remove / grow / attach / detach
3068
  @type unique_id: tuple (driver, vol_name)
3069
  @param unique_id: a tuple containing the type of ExtStorage (driver)
3070
                    and the Volume name
3071
  @type ext_params: dict
3072
  @param ext_params: ExtStorage parameters
3073
  @type size: integer
3074
  @param size: the size of the Volume in mebibytes
3075
  @type grow: integer
3076
  @param grow: the new size in mebibytes (after grow)
3077
  @type metadata: string
3078
  @param metadata: metadata info of the Volume, for use by the provider
3079
  @rtype: None or a block device path (during attach)
3080

3081
  """
3082
  driver, vol_name = unique_id
3083

    
3084
  # Create an External Storage instance of type `driver'
3085
  status, inst_es = ExtStorageFromDisk(driver)
3086
  if not status:
3087
    _ThrowError("%s" % inst_es)
3088

    
3089
  # Create the basic environment for the driver's scripts
3090
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3091
                                      grow, metadata)
3092

    
3093
  # Do not use log file for action `attach' as we need
3094
  # to get the output from RunResult
3095
  # TODO: find a way to have a log file for attach too
3096
  logfile = None
3097
  if action is not constants.ES_ACTION_ATTACH:
3098
    logfile = _VolumeLogName(action, driver, vol_name)
3099

    
3100
  # Make sure the given action results in a valid script
3101
  if action not in constants.ES_SCRIPTS:
3102
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3103
                action)
3104

    
3105
  # Find out which external script to run according the given action
3106
  script_name = action + "_script"
3107
  script = getattr(inst_es, script_name)
3108

    
3109
  # Run the external script
3110
  result = utils.RunCmd([script], env=create_env,
3111
                        cwd=inst_es.path, output=logfile,)
3112
  if result.failed:
3113
    logging.error("External storage's %s command '%s' returned"
3114
                  " error: %s, logfile: %s, output: %s",
3115
                  action, result.cmd, result.fail_reason,
3116
                  logfile, result.output)
3117

    
3118
    # If logfile is 'None' (during attach), it breaks TailFile
3119
    # TODO: have a log file for attach too
3120
    if action is not constants.ES_ACTION_ATTACH:
3121
      lines = [utils.SafeEncode(val)
3122
               for val in utils.TailFile(logfile, lines=20)]
3123
    else:
3124
      lines = result.output[-20:]
3125

    
3126
    _ThrowError("External storage's %s script failed (%s), last"
3127
                " lines of output:\n%s",
3128
                action, result.fail_reason, "\n".join(lines))
3129

    
3130
  if action == constants.ES_ACTION_ATTACH:
3131
    return result.stdout
3132

    
3133

    
3134
def ExtStorageFromDisk(name, base_dir=None):
3135
  """Create an ExtStorage instance from disk.
3136

3137
  This function will return an ExtStorage instance
3138
  if the given name is a valid ExtStorage name.
3139

3140
  @type base_dir: string
3141
  @keyword base_dir: Base directory containing ExtStorage installations.
3142
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3143
  @rtype: tuple
3144
  @return: True and the ExtStorage instance if we find a valid one, or
3145
      False and the diagnose message on error
3146

3147
  """
3148
  if base_dir is None:
3149
    es_base_dir = pathutils.ES_SEARCH_PATH
3150
  else:
3151
    es_base_dir = [base_dir]
3152

    
3153
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3154

    
3155
  if es_dir is None:
3156
    return False, ("Directory for External Storage Provider %s not"
3157
                   " found in search path" % name)
3158

    
3159
  # ES Files dictionary, we will populate it with the absolute path
3160
  # names; if the value is True, then it is a required file, otherwise
3161
  # an optional one
3162
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3163

    
3164
  es_files[constants.ES_PARAMETERS_FILE] = True
3165

    
3166
  for (filename, _) in es_files.items():
3167
    es_files[filename] = utils.PathJoin(es_dir, filename)
3168

    
3169
    try:
3170
      st = os.stat(es_files[filename])
3171
    except EnvironmentError, err:
3172
      return False, ("File '%s' under path '%s' is missing (%s)" %
3173
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3174

    
3175
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3176
      return False, ("File '%s' under path '%s' is not a regular file" %
3177
                     (filename, es_dir))
3178

    
3179
    if filename in constants.ES_SCRIPTS:
3180
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3181
        return False, ("File '%s' under path '%s' is not executable" %
3182
                       (filename, es_dir))
3183

    
3184
  parameters = []
3185
  if constants.ES_PARAMETERS_FILE in es_files:
3186
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3187
    try:
3188
      parameters = utils.ReadFile(parameters_file).splitlines()
3189
    except EnvironmentError, err:
3190
      return False, ("Error while reading the EXT parameters file at %s: %s" %
3191
                     (parameters_file, utils.ErrnoOrStr(err)))
3192
    parameters = [v.split(None, 1) for v in parameters]
3193

    
3194
  es_obj = \
3195
    objects.ExtStorage(name=name, path=es_dir,
3196
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3197
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3198
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3199
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3200
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3201
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3202
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3203
                       supported_parameters=parameters)
3204
  return True, es_obj
3205

    
3206

    
3207
def _ExtStorageEnvironment(unique_id, ext_params,
3208
                           size=None, grow=None, metadata=None):
3209
  """Calculate the environment for an External Storage script.
3210

3211
  @type unique_id: tuple (driver, vol_name)
3212
  @param unique_id: ExtStorage pool and name of the Volume
3213
  @type ext_params: dict
3214
  @param ext_params: the EXT parameters
3215
  @type size: string
3216
  @param size: size of the Volume (in mebibytes)
3217
  @type grow: string
3218
  @param grow: new size of Volume after grow (in mebibytes)
3219
  @type metadata: string
3220
  @param metadata: metadata info of the Volume
3221
  @rtype: dict
3222
  @return: dict of environment variables
3223

3224
  """
3225
  vol_name = unique_id[1]
3226

    
3227
  result = {}
3228
  result["VOL_NAME"] = vol_name
3229

    
3230
  # EXT params
3231
  for pname, pvalue in ext_params.items():
3232
    result["EXTP_%s" % pname.upper()] = str(pvalue)
3233

    
3234
  if size is not None:
3235
    result["VOL_SIZE"] = size
3236

    
3237
  if grow is not None:
3238
    result["VOL_NEW_SIZE"] = grow
3239

    
3240
  if metadata is not None:
3241
    result["VOL_METADATA"] = metadata
3242

    
3243
  return result
3244

    
3245

    
3246
def _VolumeLogName(kind, es_name, volume):
3247
  """Compute the ExtStorage log filename for a given Volume and operation.
3248

3249
  @type kind: string
3250
  @param kind: the operation type (e.g. create, remove etc.)
3251
  @type es_name: string
3252
  @param es_name: the ExtStorage name
3253
  @type volume: string
3254
  @param volume: the name of the Volume inside the External Storage
3255

3256
  """
3257
  # Check if the extstorage log dir is a valid dir
3258
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3259
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3260

    
3261
  # TODO: Use tempfile.mkstemp to create unique filename
3262
  base = ("%s-%s-%s-%s.log" %
3263
          (kind, es_name, volume, utils.TimestampForFilename()))
3264
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3265

    
3266

    
3267
DEV_MAP = {
3268
  constants.LD_LV: LogicalVolume,
3269
  constants.LD_DRBD8: DRBD8,
3270
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3271
  constants.LD_RBD: RADOSBlockDevice,
3272
  constants.LD_EXT: ExtStorageDevice,
3273
  }
3274

    
3275
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3276
  DEV_MAP[constants.LD_FILE] = FileStorage
3277

    
3278

    
3279
def _VerifyDiskType(dev_type):
3280
  if dev_type not in DEV_MAP:
3281
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3282

    
3283

    
3284
def _VerifyDiskParams(disk):
3285
  """Verifies if all disk parameters are set.
3286

3287
  """
3288
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3289
  if missing:
3290
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3291
                                 missing)
3292

    
3293

    
3294
def FindDevice(disk, children):
3295
  """Search for an existing, assembled device.
3296

3297
  This will succeed only if the device exists and is assembled, but it
3298
  does not do any actions in order to activate the device.
3299

3300
  @type disk: L{objects.Disk}
3301
  @param disk: the disk object to find
3302
  @type children: list of L{bdev.BlockDev}
3303
  @param children: the list of block devices that are children of the device
3304
                  represented by the disk parameter
3305

3306
  """
3307
  _VerifyDiskType(disk.dev_type)
3308
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3309
                                  disk.params)
3310
  if not device.attached:
3311
    return None
3312
  return device
3313

    
3314

    
3315
def Assemble(disk, children):
3316
  """Try to attach or assemble an existing device.
3317

3318
  This will attach to assemble the device, as needed, to bring it
3319
  fully up. It must be safe to run on already-assembled devices.
3320

3321
  @type disk: L{objects.Disk}
3322
  @param disk: the disk object to assemble
3323
  @type children: list of L{bdev.BlockDev}
3324
  @param children: the list of block devices that are children of the device
3325
                  represented by the disk parameter
3326

3327
  """
3328
  _VerifyDiskType(disk.dev_type)
3329
  _VerifyDiskParams(disk)
3330
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3331
                                  disk.params)
3332
  device.Assemble()
3333
  return device
3334

    
3335

    
3336
def Create(disk, children, excl_stor):
3337
  """Create a device.
3338

3339
  @type disk: L{objects.Disk}
3340
  @param disk: the disk object to create
3341
  @type children: list of L{bdev.BlockDev}
3342
  @param children: the list of block devices that are children of the device
3343
                  represented by the disk parameter
3344
  @type excl_stor: boolean
3345
  @param excl_stor: Whether exclusive_storage is active
3346

3347
  """
3348
  _VerifyDiskType(disk.dev_type)
3349
  _VerifyDiskParams(disk)
3350
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3351
                                         disk.params, excl_stor)
3352
  return device