Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ 1f614561

History | View | Annotate | Download (110.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41
from ganeti import serializer
42

    
43

    
44
# Size of reads in _CanReadDevice
45
_DEVICE_READ_SIZE = 128 * 1024
46

    
47

    
48
class RbdShowmappedJsonError(Exception):
49
  """`rbd showmmapped' JSON formatting error Exception class.
50

51
  """
52
  pass
53

    
54

    
55
def _IgnoreError(fn, *args, **kwargs):
56
  """Executes the given function, ignoring BlockDeviceErrors.
57

58
  This is used in order to simplify the execution of cleanup or
59
  rollback functions.
60

61
  @rtype: boolean
62
  @return: True when fn didn't raise an exception, False otherwise
63

64
  """
65
  try:
66
    fn(*args, **kwargs)
67
    return True
68
  except errors.BlockDeviceError, err:
69
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
70
    return False
71

    
72

    
73
def _ThrowError(msg, *args):
74
  """Log an error to the node daemon and the raise an exception.
75

76
  @type msg: string
77
  @param msg: the text of the exception
78
  @raise errors.BlockDeviceError
79

80
  """
81
  if args:
82
    msg = msg % args
83
  logging.error(msg)
84
  raise errors.BlockDeviceError(msg)
85

    
86

    
87
def _CheckResult(result):
88
  """Throws an error if the given result is a failed one.
89

90
  @param result: result from RunCmd
91

92
  """
93
  if result.failed:
94
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
95
                result.output)
96

    
97

    
98
def _CanReadDevice(path):
99
  """Check if we can read from the given device.
100

101
  This tries to read the first 128k of the device.
102

103
  """
104
  try:
105
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
106
    return True
107
  except EnvironmentError:
108
    logging.warning("Can't read from device %s", path, exc_info=True)
109
    return False
110

    
111

    
112
def _GetForbiddenFileStoragePaths():
113
  """Builds a list of path prefixes which shouldn't be used for file storage.
114

115
  @rtype: frozenset
116

117
  """
118
  paths = set([
119
    "/boot",
120
    "/dev",
121
    "/etc",
122
    "/home",
123
    "/proc",
124
    "/root",
125
    "/sys",
126
    ])
127

    
128
  for prefix in ["", "/usr", "/usr/local"]:
129
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
130
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
131

    
132
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
133

    
134

    
135
def _ComputeWrongFileStoragePaths(paths,
136
                                  _forbidden=_GetForbiddenFileStoragePaths()):
137
  """Cross-checks a list of paths for prefixes considered bad.
138

139
  Some paths, e.g. "/bin", should not be used for file storage.
140

141
  @type paths: list
142
  @param paths: List of paths to be checked
143
  @rtype: list
144
  @return: Sorted list of paths for which the user should be warned
145

146
  """
147
  def _Check(path):
148
    return (not os.path.isabs(path) or
149
            path in _forbidden or
150
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
151

    
152
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
153

    
154

    
155
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
156
  """Returns a list of file storage paths whose prefix is considered bad.
157

158
  See L{_ComputeWrongFileStoragePaths}.
159

160
  """
161
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
162

    
163

    
164
def _CheckFileStoragePath(path, allowed):
165
  """Checks if a path is in a list of allowed paths for file storage.
166

167
  @type path: string
168
  @param path: Path to check
169
  @type allowed: list
170
  @param allowed: List of allowed paths
171
  @raise errors.FileStoragePathError: If the path is not allowed
172

173
  """
174
  if not os.path.isabs(path):
175
    raise errors.FileStoragePathError("File storage path must be absolute,"
176
                                      " got '%s'" % path)
177

    
178
  for i in allowed:
179
    if not os.path.isabs(i):
180
      logging.info("Ignoring relative path '%s' for file storage", i)
181
      continue
182

    
183
    if utils.IsBelowDir(i, path):
184
      break
185
  else:
186
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
187
                                      " storage. A possible fix might be to add"
188
                                      " it to /etc/ganeti/file-storage-paths"
189
                                      " on all nodes." % path)
190

    
191

    
192
def _LoadAllowedFileStoragePaths(filename):
193
  """Loads file containing allowed file storage paths.
194

195
  @rtype: list
196
  @return: List of allowed paths (can be an empty list)
197

198
  """
199
  try:
200
    contents = utils.ReadFile(filename)
201
  except EnvironmentError:
202
    return []
203
  else:
204
    return utils.FilterEmptyLinesAndComments(contents)
205

    
206

    
207
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
208
  """Checks if a path is allowed for file storage.
209

210
  @type path: string
211
  @param path: Path to check
212
  @raise errors.FileStoragePathError: If the path is not allowed
213

214
  """
215
  allowed = _LoadAllowedFileStoragePaths(_filename)
216

    
217
  if _ComputeWrongFileStoragePaths([path]):
218
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
219
                                      path)
220

    
221
  _CheckFileStoragePath(path, allowed)
222

    
223

    
224
class BlockDev(object):
225
  """Block device abstract class.
226

227
  A block device can be in the following states:
228
    - not existing on the system, and by `Create()` it goes into:
229
    - existing but not setup/not active, and by `Assemble()` goes into:
230
    - active read-write and by `Open()` it goes into
231
    - online (=used, or ready for use)
232

233
  A device can also be online but read-only, however we are not using
234
  the readonly state (LV has it, if needed in the future) and we are
235
  usually looking at this like at a stack, so it's easier to
236
  conceptualise the transition from not-existing to online and back
237
  like a linear one.
238

239
  The many different states of the device are due to the fact that we
240
  need to cover many device types:
241
    - logical volumes are created, lvchange -a y $lv, and used
242
    - drbd devices are attached to a local disk/remote peer and made primary
243

244
  A block device is identified by three items:
245
    - the /dev path of the device (dynamic)
246
    - a unique ID of the device (static)
247
    - it's major/minor pair (dynamic)
248

249
  Not all devices implement both the first two as distinct items. LVM
250
  logical volumes have their unique ID (the pair volume group, logical
251
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
252
  the /dev path is again dynamic and the unique id is the pair (host1,
253
  dev1), (host2, dev2).
254

255
  You can get to a device in two ways:
256
    - creating the (real) device, which returns you
257
      an attached instance (lvcreate)
258
    - attaching of a python instance to an existing (real) device
259

260
  The second point, the attachment to a device, is different
261
  depending on whether the device is assembled or not. At init() time,
262
  we search for a device with the same unique_id as us. If found,
263
  good. It also means that the device is already assembled. If not,
264
  after assembly we'll have our correct major/minor.
265

266
  """
267
  # pylint: disable=W0613
268
  def __init__(self, unique_id, children, size, params, *args):
269
    self._children = children
270
    self.dev_path = None
271
    self.unique_id = unique_id
272
    self.major = None
273
    self.minor = None
274
    self.attached = False
275
    self.size = size
276
    self.params = params
277

    
278
  def Assemble(self):
279
    """Assemble the device from its components.
280

281
    Implementations of this method by child classes must ensure that:
282
      - after the device has been assembled, it knows its major/minor
283
        numbers; this allows other devices (usually parents) to probe
284
        correctly for their children
285
      - calling this method on an existing, in-use device is safe
286
      - if the device is already configured (and in an OK state),
287
        this method is idempotent
288

289
    """
290
    pass
291

    
292
  def Attach(self):
293
    """Find a device which matches our config and attach to it.
294

295
    """
296
    raise NotImplementedError
297

    
298
  def Close(self):
299
    """Notifies that the device will no longer be used for I/O.
300

301
    """
302
    raise NotImplementedError
303

    
304
  @classmethod
305
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
306
    """Create the device.
307

308
    If the device cannot be created, it will return None
309
    instead. Error messages go to the logging system.
310

311
    Note that for some devices, the unique_id is used, and for other,
312
    the children. The idea is that these two, taken together, are
313
    enough for both creation and assembly (later).
314

315
    """
316
    raise NotImplementedError
317

    
318
  def Remove(self):
319
    """Remove this device.
320

321
    This makes sense only for some of the device types: LV and file
322
    storage. Also note that if the device can't attach, the removal
323
    can't be completed.
324

325
    """
326
    raise NotImplementedError
327

    
328
  def Rename(self, new_id):
329
    """Rename this device.
330

331
    This may or may not make sense for a given device type.
332

333
    """
334
    raise NotImplementedError
335

    
336
  def Open(self, force=False):
337
    """Make the device ready for use.
338

339
    This makes the device ready for I/O. For now, just the DRBD
340
    devices need this.
341

342
    The force parameter signifies that if the device has any kind of
343
    --force thing, it should be used, we know what we are doing.
344

345
    """
346
    raise NotImplementedError
347

    
348
  def Shutdown(self):
349
    """Shut down the device, freeing its children.
350

351
    This undoes the `Assemble()` work, except for the child
352
    assembling; as such, the children on the device are still
353
    assembled after this call.
354

355
    """
356
    raise NotImplementedError
357

    
358
  def SetSyncParams(self, params):
359
    """Adjust the synchronization parameters of the mirror.
360

361
    In case this is not a mirroring device, this is no-op.
362

363
    @param params: dictionary of LD level disk parameters related to the
364
    synchronization.
365
    @rtype: list
366
    @return: a list of error messages, emitted both by the current node and by
367
    children. An empty list means no errors.
368

369
    """
370
    result = []
371
    if self._children:
372
      for child in self._children:
373
        result.extend(child.SetSyncParams(params))
374
    return result
375

    
376
  def PauseResumeSync(self, pause):
377
    """Pause/Resume the sync of the mirror.
378

379
    In case this is not a mirroring device, this is no-op.
380

381
    @param pause: Whether to pause or resume
382

383
    """
384
    result = True
385
    if self._children:
386
      for child in self._children:
387
        result = result and child.PauseResumeSync(pause)
388
    return result
389

    
390
  def GetSyncStatus(self):
391
    """Returns the sync status of the device.
392

393
    If this device is a mirroring device, this function returns the
394
    status of the mirror.
395

396
    If sync_percent is None, it means the device is not syncing.
397

398
    If estimated_time is None, it means we can't estimate
399
    the time needed, otherwise it's the time left in seconds.
400

401
    If is_degraded is True, it means the device is missing
402
    redundancy. This is usually a sign that something went wrong in
403
    the device setup, if sync_percent is None.
404

405
    The ldisk parameter represents the degradation of the local
406
    data. This is only valid for some devices, the rest will always
407
    return False (not degraded).
408

409
    @rtype: objects.BlockDevStatus
410

411
    """
412
    return objects.BlockDevStatus(dev_path=self.dev_path,
413
                                  major=self.major,
414
                                  minor=self.minor,
415
                                  sync_percent=None,
416
                                  estimated_time=None,
417
                                  is_degraded=False,
418
                                  ldisk_status=constants.LDS_OKAY)
419

    
420
  def CombinedSyncStatus(self):
421
    """Calculate the mirror status recursively for our children.
422

423
    The return value is the same as for `GetSyncStatus()` except the
424
    minimum percent and maximum time are calculated across our
425
    children.
426

427
    @rtype: objects.BlockDevStatus
428

429
    """
430
    status = self.GetSyncStatus()
431

    
432
    min_percent = status.sync_percent
433
    max_time = status.estimated_time
434
    is_degraded = status.is_degraded
435
    ldisk_status = status.ldisk_status
436

    
437
    if self._children:
438
      for child in self._children:
439
        child_status = child.GetSyncStatus()
440

    
441
        if min_percent is None:
442
          min_percent = child_status.sync_percent
443
        elif child_status.sync_percent is not None:
444
          min_percent = min(min_percent, child_status.sync_percent)
445

    
446
        if max_time is None:
447
          max_time = child_status.estimated_time
448
        elif child_status.estimated_time is not None:
449
          max_time = max(max_time, child_status.estimated_time)
450

    
451
        is_degraded = is_degraded or child_status.is_degraded
452

    
453
        if ldisk_status is None:
454
          ldisk_status = child_status.ldisk_status
455
        elif child_status.ldisk_status is not None:
456
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
457

    
458
    return objects.BlockDevStatus(dev_path=self.dev_path,
459
                                  major=self.major,
460
                                  minor=self.minor,
461
                                  sync_percent=min_percent,
462
                                  estimated_time=max_time,
463
                                  is_degraded=is_degraded,
464
                                  ldisk_status=ldisk_status)
465

    
466
  def SetInfo(self, text):
467
    """Update metadata with info text.
468

469
    Only supported for some device types.
470

471
    """
472
    for child in self._children:
473
      child.SetInfo(text)
474

    
475
  def Grow(self, amount, dryrun, backingstore):
476
    """Grow the block device.
477

478
    @type amount: integer
479
    @param amount: the amount (in mebibytes) to grow with
480
    @type dryrun: boolean
481
    @param dryrun: whether to execute the operation in simulation mode
482
        only, without actually increasing the size
483
    @param backingstore: whether to execute the operation on backing storage
484
        only, or on "logical" storage only; e.g. DRBD is logical storage,
485
        whereas LVM, file, RBD are backing storage
486

487
    """
488
    raise NotImplementedError
489

    
490
  def GetActualSize(self):
491
    """Return the actual disk size.
492

493
    @note: the device needs to be active when this is called
494

495
    """
496
    assert self.attached, "BlockDevice not attached in GetActualSize()"
497
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
498
    if result.failed:
499
      _ThrowError("blockdev failed (%s): %s",
500
                  result.fail_reason, result.output)
501
    try:
502
      sz = int(result.output.strip())
503
    except (ValueError, TypeError), err:
504
      _ThrowError("Failed to parse blockdev output: %s", str(err))
505
    return sz
506

    
507
  def __repr__(self):
508
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
509
            (self.__class__, self.unique_id, self._children,
510
             self.major, self.minor, self.dev_path))
511

    
512

    
513
class LogicalVolume(BlockDev):
514
  """Logical Volume block device.
515

516
  """
517
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
518
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
519
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
520

    
521
  def __init__(self, unique_id, children, size, params, *args):
522
    """Attaches to a LV device.
523

524
    The unique_id is a tuple (vg_name, lv_name)
525

526
    """
527
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
528
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
529
      raise ValueError("Invalid configuration data %s" % str(unique_id))
530
    self._vg_name, self._lv_name = unique_id
531
    self._ValidateName(self._vg_name)
532
    self._ValidateName(self._lv_name)
533
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
534
    self._degraded = True
535
    self.major = self.minor = self.pe_size = self.stripe_count = None
536
    self.Attach()
537

    
538
  @staticmethod
539
  def _GetStdPvSize(pvs_info):
540
    """Return the the standard PV size (used with exclusive storage).
541

542
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
543
    @rtype: float
544
    @return: size in MiB
545

546
    """
547
    assert len(pvs_info) > 0
548
    smallest = min([pv.size for pv in pvs_info])
549
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
550

    
551
  @staticmethod
552
  def _ComputeNumPvs(size, pvs_info):
553
    """Compute the number of PVs needed for an LV (with exclusive storage).
554

555
    @type size: float
556
    @param size: LV size in MiB
557
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
558
    @rtype: integer
559
    @return: number of PVs needed
560
    """
561
    assert len(pvs_info) > 0
562
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
563
    return int(math.ceil(float(size) / pv_size))
564

    
565
  @staticmethod
566
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
567
    """Return a list of empty PVs, by name.
568

569
    """
570
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
571
    if max_pvs is not None:
572
      empty_pvs = empty_pvs[:max_pvs]
573
    return map((lambda pv: pv.name), empty_pvs)
574

    
575
  @classmethod
576
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
577
    """Create a new logical volume.
578

579
    """
580
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
581
      raise errors.ProgrammerError("Invalid configuration data %s" %
582
                                   str(unique_id))
583
    vg_name, lv_name = unique_id
584
    cls._ValidateName(vg_name)
585
    cls._ValidateName(lv_name)
586
    pvs_info = cls.GetPVInfo([vg_name])
587
    if not pvs_info:
588
      if excl_stor:
589
        msg = "No (empty) PVs found"
590
      else:
591
        msg = "Can't compute PV info for vg %s" % vg_name
592
      _ThrowError(msg)
593
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
594

    
595
    pvlist = [pv.name for pv in pvs_info]
596
    if compat.any(":" in v for v in pvlist):
597
      _ThrowError("Some of your PVs have the invalid character ':' in their"
598
                  " name, this is not supported - please filter them out"
599
                  " in lvm.conf using either 'filter' or 'preferred_names'")
600

    
601
    current_pvs = len(pvlist)
602
    desired_stripes = params[constants.LDP_STRIPES]
603
    stripes = min(current_pvs, desired_stripes)
604

    
605
    if excl_stor:
606
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
607
      if err_msgs:
608
        for m in err_msgs:
609
          logging.warning(m)
610
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
611
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
612
      current_pvs = len(pvlist)
613
      if current_pvs < req_pvs:
614
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
615
                    " %d available, %d needed", size, current_pvs, req_pvs)
616
      assert current_pvs == len(pvlist)
617
      if stripes > current_pvs:
618
        # No warning issued for this, as it's no surprise
619
        stripes = current_pvs
620

    
621
    else:
622
      if stripes < desired_stripes:
623
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
624
                        " available.", desired_stripes, vg_name, current_pvs)
625
      free_size = sum([pv.free for pv in pvs_info])
626
      # The size constraint should have been checked from the master before
627
      # calling the create function.
628
      if free_size < size:
629
        _ThrowError("Not enough free space: required %s,"
630
                    " available %s", size, free_size)
631

    
632
    # If the free space is not well distributed, we won't be able to
633
    # create an optimally-striped volume; in that case, we want to try
634
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
635
    # stripes
636
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
637
    for stripes_arg in range(stripes, 0, -1):
638
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
639
      if not result.failed:
640
        break
641
    if result.failed:
642
      _ThrowError("LV create failed (%s): %s",
643
                  result.fail_reason, result.output)
644
    return LogicalVolume(unique_id, children, size, params)
645

    
646
  @staticmethod
647
  def _GetVolumeInfo(lvm_cmd, fields):
648
    """Returns LVM Volume infos using lvm_cmd
649

650
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
651
    @param fields: Fields to return
652
    @return: A list of dicts each with the parsed fields
653

654
    """
655
    if not fields:
656
      raise errors.ProgrammerError("No fields specified")
657

    
658
    sep = "|"
659
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
660
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
661

    
662
    result = utils.RunCmd(cmd)
663
    if result.failed:
664
      raise errors.CommandError("Can't get the volume information: %s - %s" %
665
                                (result.fail_reason, result.output))
666

    
667
    data = []
668
    for line in result.stdout.splitlines():
669
      splitted_fields = line.strip().split(sep)
670

    
671
      if len(fields) != len(splitted_fields):
672
        raise errors.CommandError("Can't parse %s output: line '%s'" %
673
                                  (lvm_cmd, line))
674

    
675
      data.append(splitted_fields)
676

    
677
    return data
678

    
679
  @classmethod
680
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
681
    """Get the free space info for PVs in a volume group.
682

683
    @param vg_names: list of volume group names, if empty all will be returned
684
    @param filter_allocatable: whether to skip over unallocatable PVs
685
    @param include_lvs: whether to include a list of LVs hosted on each PV
686

687
    @rtype: list
688
    @return: list of objects.LvmPvInfo objects
689

690
    """
691
    # We request "lv_name" field only if we care about LVs, so we don't get
692
    # a long list of entries with many duplicates unless we really have to.
693
    # The duplicate "pv_name" field will be ignored.
694
    if include_lvs:
695
      lvfield = "lv_name"
696
    else:
697
      lvfield = "pv_name"
698
    try:
699
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
700
                                        "pv_attr", "pv_size", lvfield])
701
    except errors.GenericError, err:
702
      logging.error("Can't get PV information: %s", err)
703
      return None
704

    
705
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
706
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
707
    # out duplicates.
708
    if include_lvs:
709
      info.sort(key=(lambda i: (i[0], i[5])))
710
    data = []
711
    lastpvi = None
712
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
713
      # (possibly) skip over pvs which are not allocatable
714
      if filter_allocatable and pv_attr[0] != "a":
715
        continue
716
      # (possibly) skip over pvs which are not in the right volume group(s)
717
      if vg_names and vg_name not in vg_names:
718
        continue
719
      # Beware of duplicates (check before inserting)
720
      if lastpvi and lastpvi.name == pv_name:
721
        if include_lvs and lv_name:
722
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
723
            lastpvi.lv_list.append(lv_name)
724
      else:
725
        if include_lvs and lv_name:
726
          lvl = [lv_name]
727
        else:
728
          lvl = []
729
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
730
                                    size=float(pv_size), free=float(pv_free),
731
                                    attributes=pv_attr, lv_list=lvl)
732
        data.append(lastpvi)
733

    
734
    return data
735

    
736
  @classmethod
737
  def _GetExclusiveStorageVgFree(cls, vg_name):
738
    """Return the free disk space in the given VG, in exclusive storage mode.
739

740
    @type vg_name: string
741
    @param vg_name: VG name
742
    @rtype: float
743
    @return: free space in MiB
744
    """
745
    pvs_info = cls.GetPVInfo([vg_name])
746
    if not pvs_info:
747
      return 0.0
748
    pv_size = cls._GetStdPvSize(pvs_info)
749
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
750
    return pv_size * num_pvs
751

    
752
  @classmethod
753
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
754
    """Get the free space info for specific VGs.
755

756
    @param vg_names: list of volume group names, if empty all will be returned
757
    @param excl_stor: whether exclusive_storage is enabled
758
    @param filter_readonly: whether to skip over readonly VGs
759

760
    @rtype: list
761
    @return: list of tuples (free_space, total_size, name) with free_space in
762
             MiB
763

764
    """
765
    try:
766
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
767
                                        "vg_size"])
768
    except errors.GenericError, err:
769
      logging.error("Can't get VG information: %s", err)
770
      return None
771

    
772
    data = []
773
    for vg_name, vg_free, vg_attr, vg_size in info:
774
      # (possibly) skip over vgs which are not writable
775
      if filter_readonly and vg_attr[0] == "r":
776
        continue
777
      # (possibly) skip over vgs which are not in the right volume group(s)
778
      if vg_names and vg_name not in vg_names:
779
        continue
780
      # Exclusive storage needs a different concept of free space
781
      if excl_stor:
782
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
783
        assert es_free <= vg_free
784
        vg_free = es_free
785
      data.append((float(vg_free), float(vg_size), vg_name))
786

    
787
    return data
788

    
789
  @classmethod
790
  def _ValidateName(cls, name):
791
    """Validates that a given name is valid as VG or LV name.
792

793
    The list of valid characters and restricted names is taken out of
794
    the lvm(8) manpage, with the simplification that we enforce both
795
    VG and LV restrictions on the names.
796

797
    """
798
    if (not cls._VALID_NAME_RE.match(name) or
799
        name in cls._INVALID_NAMES or
800
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
801
      _ThrowError("Invalid LVM name '%s'", name)
802

    
803
  def Remove(self):
804
    """Remove this logical volume.
805

806
    """
807
    if not self.minor and not self.Attach():
808
      # the LV does not exist
809
      return
810
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
811
                           (self._vg_name, self._lv_name)])
812
    if result.failed:
813
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
814

    
815
  def Rename(self, new_id):
816
    """Rename this logical volume.
817

818
    """
819
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
820
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
821
    new_vg, new_name = new_id
822
    if new_vg != self._vg_name:
823
      raise errors.ProgrammerError("Can't move a logical volume across"
824
                                   " volume groups (from %s to to %s)" %
825
                                   (self._vg_name, new_vg))
826
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
827
    if result.failed:
828
      _ThrowError("Failed to rename the logical volume: %s", result.output)
829
    self._lv_name = new_name
830
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
831

    
832
  def Attach(self):
833
    """Attach to an existing LV.
834

835
    This method will try to see if an existing and active LV exists
836
    which matches our name. If so, its major/minor will be
837
    recorded.
838

839
    """
840
    self.attached = False
841
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
842
                           "--units=k", "--nosuffix",
843
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
844
                           "vg_extent_size,stripes", self.dev_path])
845
    if result.failed:
846
      logging.error("Can't find LV %s: %s, %s",
847
                    self.dev_path, result.fail_reason, result.output)
848
      return False
849
    # the output can (and will) have multiple lines for multi-segment
850
    # LVs, as the 'stripes' parameter is a segment one, so we take
851
    # only the last entry, which is the one we're interested in; note
852
    # that with LVM2 anyway the 'stripes' value must be constant
853
    # across segments, so this is a no-op actually
854
    out = result.stdout.splitlines()
855
    if not out: # totally empty result? splitlines() returns at least
856
                # one line for any non-empty string
857
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
858
      return False
859
    out = out[-1].strip().rstrip(",")
860
    out = out.split(",")
861
    if len(out) != 5:
862
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
863
      return False
864

    
865
    status, major, minor, pe_size, stripes = out
866
    if len(status) < 6:
867
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
868
      return False
869

    
870
    try:
871
      major = int(major)
872
      minor = int(minor)
873
    except (TypeError, ValueError), err:
874
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
875

    
876
    try:
877
      pe_size = int(float(pe_size))
878
    except (TypeError, ValueError), err:
879
      logging.error("Can't parse vg extent size: %s", err)
880
      return False
881

    
882
    try:
883
      stripes = int(stripes)
884
    except (TypeError, ValueError), err:
885
      logging.error("Can't parse the number of stripes: %s", err)
886
      return False
887

    
888
    self.major = major
889
    self.minor = minor
890
    self.pe_size = pe_size
891
    self.stripe_count = stripes
892
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
893
                                      # storage
894
    self.attached = True
895
    return True
896

    
897
  def Assemble(self):
898
    """Assemble the device.
899

900
    We always run `lvchange -ay` on the LV to ensure it's active before
901
    use, as there were cases when xenvg was not active after boot
902
    (also possibly after disk issues).
903

904
    """
905
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
906
    if result.failed:
907
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
908

    
909
  def Shutdown(self):
910
    """Shutdown the device.
911

912
    This is a no-op for the LV device type, as we don't deactivate the
913
    volumes on shutdown.
914

915
    """
916
    pass
917

    
918
  def GetSyncStatus(self):
919
    """Returns the sync status of the device.
920

921
    If this device is a mirroring device, this function returns the
922
    status of the mirror.
923

924
    For logical volumes, sync_percent and estimated_time are always
925
    None (no recovery in progress, as we don't handle the mirrored LV
926
    case). The is_degraded parameter is the inverse of the ldisk
927
    parameter.
928

929
    For the ldisk parameter, we check if the logical volume has the
930
    'virtual' type, which means it's not backed by existing storage
931
    anymore (read from it return I/O error). This happens after a
932
    physical disk failure and subsequent 'vgreduce --removemissing' on
933
    the volume group.
934

935
    The status was already read in Attach, so we just return it.
936

937
    @rtype: objects.BlockDevStatus
938

939
    """
940
    if self._degraded:
941
      ldisk_status = constants.LDS_FAULTY
942
    else:
943
      ldisk_status = constants.LDS_OKAY
944

    
945
    return objects.BlockDevStatus(dev_path=self.dev_path,
946
                                  major=self.major,
947
                                  minor=self.minor,
948
                                  sync_percent=None,
949
                                  estimated_time=None,
950
                                  is_degraded=self._degraded,
951
                                  ldisk_status=ldisk_status)
952

    
953
  def Open(self, force=False):
954
    """Make the device ready for I/O.
955

956
    This is a no-op for the LV device type.
957

958
    """
959
    pass
960

    
961
  def Close(self):
962
    """Notifies that the device will no longer be used for I/O.
963

964
    This is a no-op for the LV device type.
965

966
    """
967
    pass
968

    
969
  def Snapshot(self, size):
970
    """Create a snapshot copy of an lvm block device.
971

972
    @returns: tuple (vg, lv)
973

974
    """
975
    snap_name = self._lv_name + ".snap"
976

    
977
    # remove existing snapshot if found
978
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
979
    _IgnoreError(snap.Remove)
980

    
981
    vg_info = self.GetVGInfo([self._vg_name], False)
982
    if not vg_info:
983
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
984
    free_size, _, _ = vg_info[0]
985
    if free_size < size:
986
      _ThrowError("Not enough free space: required %s,"
987
                  " available %s", size, free_size)
988

    
989
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
990
                               "-n%s" % snap_name, self.dev_path]))
991

    
992
    return (self._vg_name, snap_name)
993

    
994
  def _RemoveOldInfo(self):
995
    """Try to remove old tags from the lv.
996

997
    """
998
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
999
                           self.dev_path])
1000
    _CheckResult(result)
1001

    
1002
    raw_tags = result.stdout.strip()
1003
    if raw_tags:
1004
      for tag in raw_tags.split(","):
1005
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1006
                                   tag.strip(), self.dev_path]))
1007

    
1008
  def SetInfo(self, text):
1009
    """Update metadata with info text.
1010

1011
    """
1012
    BlockDev.SetInfo(self, text)
1013

    
1014
    self._RemoveOldInfo()
1015

    
1016
    # Replace invalid characters
1017
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1018
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1019

    
1020
    # Only up to 128 characters are allowed
1021
    text = text[:128]
1022

    
1023
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1024

    
1025
  def Grow(self, amount, dryrun, backingstore):
1026
    """Grow the logical volume.
1027

1028
    """
1029
    if not backingstore:
1030
      return
1031
    if self.pe_size is None or self.stripe_count is None:
1032
      if not self.Attach():
1033
        _ThrowError("Can't attach to LV during Grow()")
1034
    full_stripe_size = self.pe_size * self.stripe_count
1035
    # pe_size is in KB
1036
    amount *= 1024
1037
    rest = amount % full_stripe_size
1038
    if rest != 0:
1039
      amount += full_stripe_size - rest
1040
    cmd = ["lvextend", "-L", "+%dk" % amount]
1041
    if dryrun:
1042
      cmd.append("--test")
1043
    # we try multiple algorithms since the 'best' ones might not have
1044
    # space available in the right place, but later ones might (since
1045
    # they have less constraints); also note that only recent LVM
1046
    # supports 'cling'
1047
    for alloc_policy in "contiguous", "cling", "normal":
1048
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1049
      if not result.failed:
1050
        return
1051
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1052

    
1053

    
1054
class DRBD8Status(object): # pylint: disable=R0902
1055
  """A DRBD status representation class.
1056

1057
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
1058

1059
  """
1060
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1061
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1062
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1063
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1064
                       # Due to a bug in drbd in the kernel, introduced in
1065
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1066
                       "(?:\s|M)"
1067
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1068

    
1069
  CS_UNCONFIGURED = "Unconfigured"
1070
  CS_STANDALONE = "StandAlone"
1071
  CS_WFCONNECTION = "WFConnection"
1072
  CS_WFREPORTPARAMS = "WFReportParams"
1073
  CS_CONNECTED = "Connected"
1074
  CS_STARTINGSYNCS = "StartingSyncS"
1075
  CS_STARTINGSYNCT = "StartingSyncT"
1076
  CS_WFBITMAPS = "WFBitMapS"
1077
  CS_WFBITMAPT = "WFBitMapT"
1078
  CS_WFSYNCUUID = "WFSyncUUID"
1079
  CS_SYNCSOURCE = "SyncSource"
1080
  CS_SYNCTARGET = "SyncTarget"
1081
  CS_PAUSEDSYNCS = "PausedSyncS"
1082
  CS_PAUSEDSYNCT = "PausedSyncT"
1083
  CSET_SYNC = compat.UniqueFrozenset([
1084
    CS_WFREPORTPARAMS,
1085
    CS_STARTINGSYNCS,
1086
    CS_STARTINGSYNCT,
1087
    CS_WFBITMAPS,
1088
    CS_WFBITMAPT,
1089
    CS_WFSYNCUUID,
1090
    CS_SYNCSOURCE,
1091
    CS_SYNCTARGET,
1092
    CS_PAUSEDSYNCS,
1093
    CS_PAUSEDSYNCT,
1094
    ])
1095

    
1096
  DS_DISKLESS = "Diskless"
1097
  DS_ATTACHING = "Attaching" # transient state
1098
  DS_FAILED = "Failed" # transient state, next: diskless
1099
  DS_NEGOTIATING = "Negotiating" # transient state
1100
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1101
  DS_OUTDATED = "Outdated"
1102
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1103
  DS_CONSISTENT = "Consistent"
1104
  DS_UPTODATE = "UpToDate" # normal state
1105

    
1106
  RO_PRIMARY = "Primary"
1107
  RO_SECONDARY = "Secondary"
1108
  RO_UNKNOWN = "Unknown"
1109

    
1110
  def __init__(self, procline):
1111
    u = self.UNCONF_RE.match(procline)
1112
    if u:
1113
      self.cstatus = self.CS_UNCONFIGURED
1114
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1115
    else:
1116
      m = self.LINE_RE.match(procline)
1117
      if not m:
1118
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1119
      self.cstatus = m.group(1)
1120
      self.lrole = m.group(2)
1121
      self.rrole = m.group(3)
1122
      self.ldisk = m.group(4)
1123
      self.rdisk = m.group(5)
1124

    
1125
    # end reading of data from the LINE_RE or UNCONF_RE
1126

    
1127
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1128
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1129
    self.is_connected = self.cstatus == self.CS_CONNECTED
1130
    self.is_primary = self.lrole == self.RO_PRIMARY
1131
    self.is_secondary = self.lrole == self.RO_SECONDARY
1132
    self.peer_primary = self.rrole == self.RO_PRIMARY
1133
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1134
    self.both_primary = self.is_primary and self.peer_primary
1135
    self.both_secondary = self.is_secondary and self.peer_secondary
1136

    
1137
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1138
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1139
    self.peer_disk_uptodate = self.rdisk == self.DS_UPTODATE
1140

    
1141
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1142
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1143

    
1144
    m = self.SYNC_RE.match(procline)
1145
    if m:
1146
      self.sync_percent = float(m.group(1))
1147
      hours = int(m.group(2))
1148
      minutes = int(m.group(3))
1149
      seconds = int(m.group(4))
1150
      self.est_time = hours * 3600 + minutes * 60 + seconds
1151
    else:
1152
      # we have (in this if branch) no percent information, but if
1153
      # we're resyncing we need to 'fake' a sync percent information,
1154
      # as this is how cmdlib determines if it makes sense to wait for
1155
      # resyncing or not
1156
      if self.is_in_resync:
1157
        self.sync_percent = 0
1158
      else:
1159
        self.sync_percent = None
1160
      self.est_time = None
1161

    
1162

    
1163
class BaseDRBD(BlockDev): # pylint: disable=W0223
1164
  """Base DRBD class.
1165

1166
  This class contains a few bits of common functionality between the
1167
  0.7 and 8.x versions of DRBD.
1168

1169
  """
1170
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1171
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1172
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1173
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1174

    
1175
  _DRBD_MAJOR = 147
1176
  _ST_UNCONFIGURED = "Unconfigured"
1177
  _ST_WFCONNECTION = "WFConnection"
1178
  _ST_CONNECTED = "Connected"
1179

    
1180
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1181
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1182

    
1183
  @staticmethod
1184
  def _GetProcData(filename=_STATUS_FILE):
1185
    """Return data from /proc/drbd.
1186

1187
    """
1188
    try:
1189
      data = utils.ReadFile(filename).splitlines()
1190
    except EnvironmentError, err:
1191
      if err.errno == errno.ENOENT:
1192
        _ThrowError("The file %s cannot be opened, check if the module"
1193
                    " is loaded (%s)", filename, str(err))
1194
      else:
1195
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1196
    if not data:
1197
      _ThrowError("Can't read any data from %s", filename)
1198
    return data
1199

    
1200
  @classmethod
1201
  def _MassageProcData(cls, data):
1202
    """Transform the output of _GetProdData into a nicer form.
1203

1204
    @return: a dictionary of minor: joined lines from /proc/drbd
1205
        for that minor
1206

1207
    """
1208
    results = {}
1209
    old_minor = old_line = None
1210
    for line in data:
1211
      if not line: # completely empty lines, as can be returned by drbd8.0+
1212
        continue
1213
      lresult = cls._VALID_LINE_RE.match(line)
1214
      if lresult is not None:
1215
        if old_minor is not None:
1216
          results[old_minor] = old_line
1217
        old_minor = int(lresult.group(1))
1218
        old_line = line
1219
      else:
1220
        if old_minor is not None:
1221
          old_line += " " + line.strip()
1222
    # add last line
1223
    if old_minor is not None:
1224
      results[old_minor] = old_line
1225
    return results
1226

    
1227
  @classmethod
1228
  def _GetVersion(cls, proc_data):
1229
    """Return the DRBD version.
1230

1231
    This will return a dict with keys:
1232
      - k_major
1233
      - k_minor
1234
      - k_point
1235
      - api
1236
      - proto
1237
      - proto2 (only on drbd > 8.2.X)
1238

1239
    """
1240
    first_line = proc_data[0].strip()
1241
    version = cls._VERSION_RE.match(first_line)
1242
    if not version:
1243
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1244
                                    first_line)
1245

    
1246
    values = version.groups()
1247
    retval = {
1248
      "k_major": int(values[0]),
1249
      "k_minor": int(values[1]),
1250
      "k_point": int(values[2]),
1251
      "api": int(values[3]),
1252
      "proto": int(values[4]),
1253
      }
1254
    if values[5] is not None:
1255
      retval["proto2"] = values[5]
1256

    
1257
    return retval
1258

    
1259
  @staticmethod
1260
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1261
    """Returns DRBD usermode_helper currently set.
1262

1263
    """
1264
    try:
1265
      helper = utils.ReadFile(filename).splitlines()[0]
1266
    except EnvironmentError, err:
1267
      if err.errno == errno.ENOENT:
1268
        _ThrowError("The file %s cannot be opened, check if the module"
1269
                    " is loaded (%s)", filename, str(err))
1270
      else:
1271
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1272
    if not helper:
1273
      _ThrowError("Can't read any data from %s", filename)
1274
    return helper
1275

    
1276
  @staticmethod
1277
  def _DevPath(minor):
1278
    """Return the path to a drbd device for a given minor.
1279

1280
    """
1281
    return "/dev/drbd%d" % minor
1282

    
1283
  @classmethod
1284
  def GetUsedDevs(cls):
1285
    """Compute the list of used DRBD devices.
1286

1287
    """
1288
    data = cls._GetProcData()
1289

    
1290
    used_devs = {}
1291
    for line in data:
1292
      match = cls._VALID_LINE_RE.match(line)
1293
      if not match:
1294
        continue
1295
      minor = int(match.group(1))
1296
      state = match.group(2)
1297
      if state == cls._ST_UNCONFIGURED:
1298
        continue
1299
      used_devs[minor] = state, line
1300

    
1301
    return used_devs
1302

    
1303
  def _SetFromMinor(self, minor):
1304
    """Set our parameters based on the given minor.
1305

1306
    This sets our minor variable and our dev_path.
1307

1308
    """
1309
    if minor is None:
1310
      self.minor = self.dev_path = None
1311
      self.attached = False
1312
    else:
1313
      self.minor = minor
1314
      self.dev_path = self._DevPath(minor)
1315
      self.attached = True
1316

    
1317
  @staticmethod
1318
  def _CheckMetaSize(meta_device):
1319
    """Check if the given meta device looks like a valid one.
1320

1321
    This currently only checks the size, which must be around
1322
    128MiB.
1323

1324
    """
1325
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1326
    if result.failed:
1327
      _ThrowError("Failed to get device size: %s - %s",
1328
                  result.fail_reason, result.output)
1329
    try:
1330
      sectors = int(result.stdout)
1331
    except (TypeError, ValueError):
1332
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1333
    num_bytes = sectors * 512
1334
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1335
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1336
    # the maximum *valid* size of the meta device when living on top
1337
    # of LVM is hard to compute: it depends on the number of stripes
1338
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1339
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1340
    # size meta device; as such, we restrict it to 1GB (a little bit
1341
    # too generous, but making assumptions about PE size is hard)
1342
    if num_bytes > 1024 * 1024 * 1024:
1343
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1344

    
1345
  def Rename(self, new_id):
1346
    """Rename a device.
1347

1348
    This is not supported for drbd devices.
1349

1350
    """
1351
    raise errors.ProgrammerError("Can't rename a drbd device")
1352

    
1353

    
1354
class DRBD8(BaseDRBD):
1355
  """DRBD v8.x block device.
1356

1357
  This implements the local host part of the DRBD device, i.e. it
1358
  doesn't do anything to the supposed peer. If you need a fully
1359
  connected DRBD pair, you need to use this class on both hosts.
1360

1361
  The unique_id for the drbd device is a (local_ip, local_port,
1362
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1363
  two children: the data device and the meta_device. The meta device
1364
  is checked for valid size and is zeroed on create.
1365

1366
  """
1367
  _MAX_MINORS = 255
1368
  _PARSE_SHOW = None
1369

    
1370
  # timeout constants
1371
  _NET_RECONFIG_TIMEOUT = 60
1372

    
1373
  # command line options for barriers
1374
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1375
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1376
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1377
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1378

    
1379
  def __init__(self, unique_id, children, size, params, *args):
1380
    if children and children.count(None) > 0:
1381
      children = []
1382
    if len(children) not in (0, 2):
1383
      raise ValueError("Invalid configuration data %s" % str(children))
1384
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1385
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1386
    (self._lhost, self._lport,
1387
     self._rhost, self._rport,
1388
     self._aminor, self._secret) = unique_id
1389
    if children:
1390
      if not _CanReadDevice(children[1].dev_path):
1391
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1392
        children = []
1393
    super(DRBD8, self).__init__(unique_id, children, size, params)
1394
    self.major = self._DRBD_MAJOR
1395
    version = self._GetVersion(self._GetProcData())
1396
    if version["k_major"] != 8:
1397
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1398
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1399
                  version["k_major"], version["k_minor"])
1400

    
1401
    if (self._lhost is not None and self._lhost == self._rhost and
1402
        self._lport == self._rport):
1403
      raise ValueError("Invalid configuration data, same local/remote %s" %
1404
                       (unique_id,))
1405
    self.Attach()
1406

    
1407
  @classmethod
1408
  def _InitMeta(cls, minor, dev_path):
1409
    """Initialize a meta device.
1410

1411
    This will not work if the given minor is in use.
1412

1413
    """
1414
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1415
    # try to auto-detect existing filesystems or similar (see
1416
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1417
    # care about the first 128MB of data in the device, even though it
1418
    # can be bigger
1419
    result = utils.RunCmd([constants.DD_CMD,
1420
                           "if=/dev/zero", "of=%s" % dev_path,
1421
                           "bs=1048576", "count=128", "oflag=direct"])
1422
    if result.failed:
1423
      _ThrowError("Can't wipe the meta device: %s", result.output)
1424

    
1425
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1426
                           "v08", dev_path, "0", "create-md"])
1427
    if result.failed:
1428
      _ThrowError("Can't initialize meta device: %s", result.output)
1429

    
1430
  @classmethod
1431
  def _FindUnusedMinor(cls):
1432
    """Find an unused DRBD device.
1433

1434
    This is specific to 8.x as the minors are allocated dynamically,
1435
    so non-existing numbers up to a max minor count are actually free.
1436

1437
    """
1438
    data = cls._GetProcData()
1439

    
1440
    highest = None
1441
    for line in data:
1442
      match = cls._UNUSED_LINE_RE.match(line)
1443
      if match:
1444
        return int(match.group(1))
1445
      match = cls._VALID_LINE_RE.match(line)
1446
      if match:
1447
        minor = int(match.group(1))
1448
        highest = max(highest, minor)
1449
    if highest is None: # there are no minors in use at all
1450
      return 0
1451
    if highest >= cls._MAX_MINORS:
1452
      logging.error("Error: no free drbd minors!")
1453
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1454
    return highest + 1
1455

    
1456
  @classmethod
1457
  def _GetShowParser(cls):
1458
    """Return a parser for `drbd show` output.
1459

1460
    This will either create or return an already-created parser for the
1461
    output of the command `drbd show`.
1462

1463
    """
1464
    if cls._PARSE_SHOW is not None:
1465
      return cls._PARSE_SHOW
1466

    
1467
    # pyparsing setup
1468
    lbrace = pyp.Literal("{").suppress()
1469
    rbrace = pyp.Literal("}").suppress()
1470
    lbracket = pyp.Literal("[").suppress()
1471
    rbracket = pyp.Literal("]").suppress()
1472
    semi = pyp.Literal(";").suppress()
1473
    colon = pyp.Literal(":").suppress()
1474
    # this also converts the value to an int
1475
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1476

    
1477
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1478
    defa = pyp.Literal("_is_default").suppress()
1479
    dbl_quote = pyp.Literal('"').suppress()
1480

    
1481
    keyword = pyp.Word(pyp.alphanums + "-")
1482

    
1483
    # value types
1484
    value = pyp.Word(pyp.alphanums + "_-/.:")
1485
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1486
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1487
                 pyp.Word(pyp.nums + ".") + colon + number)
1488
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1489
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1490
                 pyp.Optional(rbracket) + colon + number)
1491
    # meta device, extended syntax
1492
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1493
    # device name, extended syntax
1494
    device_value = pyp.Literal("minor").suppress() + number
1495

    
1496
    # a statement
1497
    stmt = (~rbrace + keyword + ~lbrace +
1498
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1499
                         device_value) +
1500
            pyp.Optional(defa) + semi +
1501
            pyp.Optional(pyp.restOfLine).suppress())
1502

    
1503
    # an entire section
1504
    section_name = pyp.Word(pyp.alphas + "_")
1505
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1506

    
1507
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1508
    bnf.ignore(comment)
1509

    
1510
    cls._PARSE_SHOW = bnf
1511

    
1512
    return bnf
1513

    
1514
  @classmethod
1515
  def _GetShowData(cls, minor):
1516
    """Return the `drbdsetup show` data for a minor.
1517

1518
    """
1519
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1520
    if result.failed:
1521
      logging.error("Can't display the drbd config: %s - %s",
1522
                    result.fail_reason, result.output)
1523
      return None
1524
    return result.stdout
1525

    
1526
  @classmethod
1527
  def _GetDevInfo(cls, out):
1528
    """Parse details about a given DRBD minor.
1529

1530
    This return, if available, the local backing device (as a path)
1531
    and the local and remote (ip, port) information from a string
1532
    containing the output of the `drbdsetup show` command as returned
1533
    by _GetShowData.
1534

1535
    """
1536
    data = {}
1537
    if not out:
1538
      return data
1539

    
1540
    bnf = cls._GetShowParser()
1541
    # run pyparse
1542

    
1543
    try:
1544
      results = bnf.parseString(out)
1545
    except pyp.ParseException, err:
1546
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1547

    
1548
    # and massage the results into our desired format
1549
    for section in results:
1550
      sname = section[0]
1551
      if sname == "_this_host":
1552
        for lst in section[1:]:
1553
          if lst[0] == "disk":
1554
            data["local_dev"] = lst[1]
1555
          elif lst[0] == "meta-disk":
1556
            data["meta_dev"] = lst[1]
1557
            data["meta_index"] = lst[2]
1558
          elif lst[0] == "address":
1559
            data["local_addr"] = tuple(lst[1:])
1560
      elif sname == "_remote_host":
1561
        for lst in section[1:]:
1562
          if lst[0] == "address":
1563
            data["remote_addr"] = tuple(lst[1:])
1564
    return data
1565

    
1566
  def _MatchesLocal(self, info):
1567
    """Test if our local config matches with an existing device.
1568

1569
    The parameter should be as returned from `_GetDevInfo()`. This
1570
    method tests if our local backing device is the same as the one in
1571
    the info parameter, in effect testing if we look like the given
1572
    device.
1573

1574
    """
1575
    if self._children:
1576
      backend, meta = self._children
1577
    else:
1578
      backend = meta = None
1579

    
1580
    if backend is not None:
1581
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1582
    else:
1583
      retval = ("local_dev" not in info)
1584

    
1585
    if meta is not None:
1586
      retval = retval and ("meta_dev" in info and
1587
                           info["meta_dev"] == meta.dev_path)
1588
      retval = retval and ("meta_index" in info and
1589
                           info["meta_index"] == 0)
1590
    else:
1591
      retval = retval and ("meta_dev" not in info and
1592
                           "meta_index" not in info)
1593
    return retval
1594

    
1595
  def _MatchesNet(self, info):
1596
    """Test if our network config matches with an existing device.
1597

1598
    The parameter should be as returned from `_GetDevInfo()`. This
1599
    method tests if our network configuration is the same as the one
1600
    in the info parameter, in effect testing if we look like the given
1601
    device.
1602

1603
    """
1604
    if (((self._lhost is None and not ("local_addr" in info)) and
1605
         (self._rhost is None and not ("remote_addr" in info)))):
1606
      return True
1607

    
1608
    if self._lhost is None:
1609
      return False
1610

    
1611
    if not ("local_addr" in info and
1612
            "remote_addr" in info):
1613
      return False
1614

    
1615
    retval = (info["local_addr"] == (self._lhost, self._lport))
1616
    retval = (retval and
1617
              info["remote_addr"] == (self._rhost, self._rport))
1618
    return retval
1619

    
1620
  def _AssembleLocal(self, minor, backend, meta, size):
1621
    """Configure the local part of a DRBD device.
1622

1623
    """
1624
    args = ["drbdsetup", self._DevPath(minor), "disk",
1625
            backend, meta, "0",
1626
            "-e", "detach",
1627
            "--create-device"]
1628
    if size:
1629
      args.extend(["-d", "%sm" % size])
1630

    
1631
    version = self._GetVersion(self._GetProcData())
1632
    vmaj = version["k_major"]
1633
    vmin = version["k_minor"]
1634
    vrel = version["k_point"]
1635

    
1636
    barrier_args = \
1637
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1638
                                   self.params[constants.LDP_BARRIERS],
1639
                                   self.params[constants.LDP_NO_META_FLUSH])
1640
    args.extend(barrier_args)
1641

    
1642
    if self.params[constants.LDP_DISK_CUSTOM]:
1643
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1644

    
1645
    result = utils.RunCmd(args)
1646
    if result.failed:
1647
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1648

    
1649
  @classmethod
1650
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1651
                              disable_meta_flush):
1652
    """Compute the DRBD command line parameters for disk barriers
1653

1654
    Returns a list of the disk barrier parameters as requested via the
1655
    disabled_barriers and disable_meta_flush arguments, and according to the
1656
    supported ones in the DRBD version vmaj.vmin.vrel
1657

1658
    If the desired option is unsupported, raises errors.BlockDeviceError.
1659

1660
    """
1661
    disabled_barriers_set = frozenset(disabled_barriers)
1662
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1663
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1664
                                    " barriers" % disabled_barriers)
1665

    
1666
    args = []
1667

    
1668
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1669
    # does not exist)
1670
    if not vmaj == 8 and vmin in (0, 2, 3):
1671
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1672
                                    (vmaj, vmin, vrel))
1673

    
1674
    def _AppendOrRaise(option, min_version):
1675
      """Helper for DRBD options"""
1676
      if min_version is not None and vrel >= min_version:
1677
        args.append(option)
1678
      else:
1679
        raise errors.BlockDeviceError("Could not use the option %s as the"
1680
                                      " DRBD version %d.%d.%d does not support"
1681
                                      " it." % (option, vmaj, vmin, vrel))
1682

    
1683
    # the minimum version for each feature is encoded via pairs of (minor
1684
    # version -> x) where x is version in which support for the option was
1685
    # introduced.
1686
    meta_flush_supported = disk_flush_supported = {
1687
      0: 12,
1688
      2: 7,
1689
      3: 0,
1690
      }
1691

    
1692
    disk_drain_supported = {
1693
      2: 7,
1694
      3: 0,
1695
      }
1696

    
1697
    disk_barriers_supported = {
1698
      3: 0,
1699
      }
1700

    
1701
    # meta flushes
1702
    if disable_meta_flush:
1703
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1704
                     meta_flush_supported.get(vmin, None))
1705

    
1706
    # disk flushes
1707
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1708
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1709
                     disk_flush_supported.get(vmin, None))
1710

    
1711
    # disk drain
1712
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1713
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1714
                     disk_drain_supported.get(vmin, None))
1715

    
1716
    # disk barriers
1717
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1718
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1719
                     disk_barriers_supported.get(vmin, None))
1720

    
1721
    return args
1722

    
1723
  def _AssembleNet(self, minor, net_info, protocol,
1724
                   dual_pri=False, hmac=None, secret=None):
1725
    """Configure the network part of the device.
1726

1727
    """
1728
    lhost, lport, rhost, rport = net_info
1729
    if None in net_info:
1730
      # we don't want network connection and actually want to make
1731
      # sure its shutdown
1732
      self._ShutdownNet(minor)
1733
      return
1734

    
1735
    # Workaround for a race condition. When DRBD is doing its dance to
1736
    # establish a connection with its peer, it also sends the
1737
    # synchronization speed over the wire. In some cases setting the
1738
    # sync speed only after setting up both sides can race with DRBD
1739
    # connecting, hence we set it here before telling DRBD anything
1740
    # about its peer.
1741
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1742
    if sync_errors:
1743
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1744
                  (minor, utils.CommaJoin(sync_errors)))
1745

    
1746
    if netutils.IP6Address.IsValid(lhost):
1747
      if not netutils.IP6Address.IsValid(rhost):
1748
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1749
                    (minor, lhost, rhost))
1750
      family = "ipv6"
1751
    elif netutils.IP4Address.IsValid(lhost):
1752
      if not netutils.IP4Address.IsValid(rhost):
1753
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1754
                    (minor, lhost, rhost))
1755
      family = "ipv4"
1756
    else:
1757
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1758

    
1759
    args = ["drbdsetup", self._DevPath(minor), "net",
1760
            "%s:%s:%s" % (family, lhost, lport),
1761
            "%s:%s:%s" % (family, rhost, rport), protocol,
1762
            "-A", "discard-zero-changes",
1763
            "-B", "consensus",
1764
            "--create-device",
1765
            ]
1766
    if dual_pri:
1767
      args.append("-m")
1768
    if hmac and secret:
1769
      args.extend(["-a", hmac, "-x", secret])
1770

    
1771
    if self.params[constants.LDP_NET_CUSTOM]:
1772
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1773

    
1774
    result = utils.RunCmd(args)
1775
    if result.failed:
1776
      _ThrowError("drbd%d: can't setup network: %s - %s",
1777
                  minor, result.fail_reason, result.output)
1778

    
1779
    def _CheckNetworkConfig():
1780
      info = self._GetDevInfo(self._GetShowData(minor))
1781
      if not "local_addr" in info or not "remote_addr" in info:
1782
        raise utils.RetryAgain()
1783

    
1784
      if (info["local_addr"] != (lhost, lport) or
1785
          info["remote_addr"] != (rhost, rport)):
1786
        raise utils.RetryAgain()
1787

    
1788
    try:
1789
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1790
    except utils.RetryTimeout:
1791
      _ThrowError("drbd%d: timeout while configuring network", minor)
1792

    
1793
  def AddChildren(self, devices):
1794
    """Add a disk to the DRBD device.
1795

1796
    """
1797
    if self.minor is None:
1798
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1799
                  self._aminor)
1800
    if len(devices) != 2:
1801
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1802
    info = self._GetDevInfo(self._GetShowData(self.minor))
1803
    if "local_dev" in info:
1804
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1805
    backend, meta = devices
1806
    if backend.dev_path is None or meta.dev_path is None:
1807
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1808
    backend.Open()
1809
    meta.Open()
1810
    self._CheckMetaSize(meta.dev_path)
1811
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1812

    
1813
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1814
    self._children = devices
1815

    
1816
  def RemoveChildren(self, devices):
1817
    """Detach the drbd device from local storage.
1818

1819
    """
1820
    if self.minor is None:
1821
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1822
                  self._aminor)
1823
    # early return if we don't actually have backing storage
1824
    info = self._GetDevInfo(self._GetShowData(self.minor))
1825
    if "local_dev" not in info:
1826
      return
1827
    if len(self._children) != 2:
1828
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1829
                  self._children)
1830
    if self._children.count(None) == 2: # we don't actually have children :)
1831
      logging.warning("drbd%d: requested detach while detached", self.minor)
1832
      return
1833
    if len(devices) != 2:
1834
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1835
    for child, dev in zip(self._children, devices):
1836
      if dev != child.dev_path:
1837
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1838
                    " RemoveChildren", self.minor, dev, child.dev_path)
1839

    
1840
    self._ShutdownLocal(self.minor)
1841
    self._children = []
1842

    
1843
  @classmethod
1844
  def _SetMinorSyncParams(cls, minor, params):
1845
    """Set the parameters of the DRBD syncer.
1846

1847
    This is the low-level implementation.
1848

1849
    @type minor: int
1850
    @param minor: the drbd minor whose settings we change
1851
    @type params: dict
1852
    @param params: LD level disk parameters related to the synchronization
1853
    @rtype: list
1854
    @return: a list of error messages
1855

1856
    """
1857

    
1858
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1859
    if params[constants.LDP_DYNAMIC_RESYNC]:
1860
      version = cls._GetVersion(cls._GetProcData())
1861
      vmin = version["k_minor"]
1862
      vrel = version["k_point"]
1863

    
1864
      # By definition we are using 8.x, so just check the rest of the version
1865
      # number
1866
      if vmin != 3 or vrel < 9:
1867
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1868
               "dynamic resync speed controller" % (vmin, vrel))
1869
        logging.error(msg)
1870
        return [msg]
1871

    
1872
      if params[constants.LDP_PLAN_AHEAD] == 0:
1873
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1874
               " controller at DRBD level. If you want to disable it, please"
1875
               " set the dynamic-resync disk parameter to False.")
1876
        logging.error(msg)
1877
        return [msg]
1878

    
1879
      # add the c-* parameters to args
1880
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1881
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1882
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1883
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1884
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1885
                   ])
1886

    
1887
    else:
1888
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1889

    
1890
    args.append("--create-device")
1891
    result = utils.RunCmd(args)
1892
    if result.failed:
1893
      msg = ("Can't change syncer rate: %s - %s" %
1894
             (result.fail_reason, result.output))
1895
      logging.error(msg)
1896
      return [msg]
1897

    
1898
    return []
1899

    
1900
  def SetSyncParams(self, params):
1901
    """Set the synchronization parameters of the DRBD syncer.
1902

1903
    @type params: dict
1904
    @param params: LD level disk parameters related to the synchronization
1905
    @rtype: list
1906
    @return: a list of error messages, emitted both by the current node and by
1907
    children. An empty list means no errors
1908

1909
    """
1910
    if self.minor is None:
1911
      err = "Not attached during SetSyncParams"
1912
      logging.info(err)
1913
      return [err]
1914

    
1915
    children_result = super(DRBD8, self).SetSyncParams(params)
1916
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1917
    return children_result
1918

    
1919
  def PauseResumeSync(self, pause):
1920
    """Pauses or resumes the sync of a DRBD device.
1921

1922
    @param pause: Wether to pause or resume
1923
    @return: the success of the operation
1924

1925
    """
1926
    if self.minor is None:
1927
      logging.info("Not attached during PauseSync")
1928
      return False
1929

    
1930
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1931

    
1932
    if pause:
1933
      cmd = "pause-sync"
1934
    else:
1935
      cmd = "resume-sync"
1936

    
1937
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1938
    if result.failed:
1939
      logging.error("Can't %s: %s - %s", cmd,
1940
                    result.fail_reason, result.output)
1941
    return not result.failed and children_result
1942

    
1943
  def GetProcStatus(self):
1944
    """Return device data from /proc.
1945

1946
    """
1947
    if self.minor is None:
1948
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1949
    proc_info = self._MassageProcData(self._GetProcData())
1950
    if self.minor not in proc_info:
1951
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1952
    return DRBD8Status(proc_info[self.minor])
1953

    
1954
  def GetSyncStatus(self):
1955
    """Returns the sync status of the device.
1956

1957

1958
    If sync_percent is None, it means all is ok
1959
    If estimated_time is None, it means we can't estimate
1960
    the time needed, otherwise it's the time left in seconds.
1961

1962

1963
    We set the is_degraded parameter to True on two conditions:
1964
    network not connected or local disk missing.
1965

1966
    We compute the ldisk parameter based on whether we have a local
1967
    disk or not.
1968

1969
    @rtype: objects.BlockDevStatus
1970

1971
    """
1972
    if self.minor is None and not self.Attach():
1973
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1974

    
1975
    stats = self.GetProcStatus()
1976
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1977

    
1978
    if stats.is_disk_uptodate:
1979
      ldisk_status = constants.LDS_OKAY
1980
    elif stats.is_diskless:
1981
      ldisk_status = constants.LDS_FAULTY
1982
    else:
1983
      ldisk_status = constants.LDS_UNKNOWN
1984

    
1985
    return objects.BlockDevStatus(dev_path=self.dev_path,
1986
                                  major=self.major,
1987
                                  minor=self.minor,
1988
                                  sync_percent=stats.sync_percent,
1989
                                  estimated_time=stats.est_time,
1990
                                  is_degraded=is_degraded,
1991
                                  ldisk_status=ldisk_status)
1992

    
1993
  def Open(self, force=False):
1994
    """Make the local state primary.
1995

1996
    If the 'force' parameter is given, the '-o' option is passed to
1997
    drbdsetup. Since this is a potentially dangerous operation, the
1998
    force flag should be only given after creation, when it actually
1999
    is mandatory.
2000

2001
    """
2002
    if self.minor is None and not self.Attach():
2003
      logging.error("DRBD cannot attach to a device during open")
2004
      return False
2005
    cmd = ["drbdsetup", self.dev_path, "primary"]
2006
    if force:
2007
      cmd.append("-o")
2008
    result = utils.RunCmd(cmd)
2009
    if result.failed:
2010
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2011
                  result.output)
2012

    
2013
  def Close(self):
2014
    """Make the local state secondary.
2015

2016
    This will, of course, fail if the device is in use.
2017

2018
    """
2019
    if self.minor is None and not self.Attach():
2020
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2021
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2022
    if result.failed:
2023
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2024
                  self.minor, result.output)
2025

    
2026
  def DisconnectNet(self):
2027
    """Removes network configuration.
2028

2029
    This method shutdowns the network side of the device.
2030

2031
    The method will wait up to a hardcoded timeout for the device to
2032
    go into standalone after the 'disconnect' command before
2033
    re-configuring it, as sometimes it takes a while for the
2034
    disconnect to actually propagate and thus we might issue a 'net'
2035
    command while the device is still connected. If the device will
2036
    still be attached to the network and we time out, we raise an
2037
    exception.
2038

2039
    """
2040
    if self.minor is None:
2041
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2042

    
2043
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2044
      _ThrowError("drbd%d: DRBD disk missing network info in"
2045
                  " DisconnectNet()", self.minor)
2046

    
2047
    class _DisconnectStatus:
2048
      def __init__(self, ever_disconnected):
2049
        self.ever_disconnected = ever_disconnected
2050

    
2051
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2052

    
2053
    def _WaitForDisconnect():
2054
      if self.GetProcStatus().is_standalone:
2055
        return
2056

    
2057
      # retry the disconnect, it seems possible that due to a well-time
2058
      # disconnect on the peer, my disconnect command might be ignored and
2059
      # forgotten
2060
      dstatus.ever_disconnected = \
2061
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2062

    
2063
      raise utils.RetryAgain()
2064

    
2065
    # Keep start time
2066
    start_time = time.time()
2067

    
2068
    try:
2069
      # Start delay at 100 milliseconds and grow up to 2 seconds
2070
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2071
                  self._NET_RECONFIG_TIMEOUT)
2072
    except utils.RetryTimeout:
2073
      if dstatus.ever_disconnected:
2074
        msg = ("drbd%d: device did not react to the"
2075
               " 'disconnect' command in a timely manner")
2076
      else:
2077
        msg = "drbd%d: can't shutdown network, even after multiple retries"
2078

    
2079
      _ThrowError(msg, self.minor)
2080

    
2081
    reconfig_time = time.time() - start_time
2082
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2083
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2084
                   self.minor, reconfig_time)
2085

    
2086
  def AttachNet(self, multimaster):
2087
    """Reconnects the network.
2088

2089
    This method connects the network side of the device with a
2090
    specified multi-master flag. The device needs to be 'Standalone'
2091
    but have valid network configuration data.
2092

2093
    Args:
2094
      - multimaster: init the network in dual-primary mode
2095

2096
    """
2097
    if self.minor is None:
2098
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2099

    
2100
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2101
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2102

    
2103
    status = self.GetProcStatus()
2104

    
2105
    if not status.is_standalone:
2106
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2107

    
2108
    self._AssembleNet(self.minor,
2109
                      (self._lhost, self._lport, self._rhost, self._rport),
2110
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2111
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2112

    
2113
  def Attach(self):
2114
    """Check if our minor is configured.
2115

2116
    This doesn't do any device configurations - it only checks if the
2117
    minor is in a state different from Unconfigured.
2118

2119
    Note that this function will not change the state of the system in
2120
    any way (except in case of side-effects caused by reading from
2121
    /proc).
2122

2123
    """
2124
    used_devs = self.GetUsedDevs()
2125
    if self._aminor in used_devs:
2126
      minor = self._aminor
2127
    else:
2128
      minor = None
2129

    
2130
    self._SetFromMinor(minor)
2131
    return minor is not None
2132

    
2133
  def Assemble(self):
2134
    """Assemble the drbd.
2135

2136
    Method:
2137
      - if we have a configured device, we try to ensure that it matches
2138
        our config
2139
      - if not, we create it from zero
2140
      - anyway, set the device parameters
2141

2142
    """
2143
    super(DRBD8, self).Assemble()
2144

    
2145
    self.Attach()
2146
    if self.minor is None:
2147
      # local device completely unconfigured
2148
      self._FastAssemble()
2149
    else:
2150
      # we have to recheck the local and network status and try to fix
2151
      # the device
2152
      self._SlowAssemble()
2153

    
2154
    sync_errors = self.SetSyncParams(self.params)
2155
    if sync_errors:
2156
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2157
                  (self.minor, utils.CommaJoin(sync_errors)))
2158

    
2159
  def _SlowAssemble(self):
2160
    """Assembles the DRBD device from a (partially) configured device.
2161

2162
    In case of partially attached (local device matches but no network
2163
    setup), we perform the network attach. If successful, we re-test
2164
    the attach if can return success.
2165

2166
    """
2167
    # TODO: Rewrite to not use a for loop just because there is 'break'
2168
    # pylint: disable=W0631
2169
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2170
    for minor in (self._aminor,):
2171
      info = self._GetDevInfo(self._GetShowData(minor))
2172
      match_l = self._MatchesLocal(info)
2173
      match_r = self._MatchesNet(info)
2174

    
2175
      if match_l and match_r:
2176
        # everything matches
2177
        break
2178

    
2179
      if match_l and not match_r and "local_addr" not in info:
2180
        # disk matches, but not attached to network, attach and recheck
2181
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2182
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2183
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2184
          break
2185
        else:
2186
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2187
                      " show' disagrees", minor)
2188

    
2189
      if match_r and "local_dev" not in info:
2190
        # no local disk, but network attached and it matches
2191
        self._AssembleLocal(minor, self._children[0].dev_path,
2192
                            self._children[1].dev_path, self.size)
2193
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2194
          break
2195
        else:
2196
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2197
                      " show' disagrees", minor)
2198

    
2199
      # this case must be considered only if we actually have local
2200
      # storage, i.e. not in diskless mode, because all diskless
2201
      # devices are equal from the point of view of local
2202
      # configuration
2203
      if (match_l and "local_dev" in info and
2204
          not match_r and "local_addr" in info):
2205
        # strange case - the device network part points to somewhere
2206
        # else, even though its local storage is ours; as we own the
2207
        # drbd space, we try to disconnect from the remote peer and
2208
        # reconnect to our correct one
2209
        try:
2210
          self._ShutdownNet(minor)
2211
        except errors.BlockDeviceError, err:
2212
          _ThrowError("drbd%d: device has correct local storage, wrong"
2213
                      " remote peer and is unable to disconnect in order"
2214
                      " to attach to the correct peer: %s", minor, str(err))
2215
        # note: _AssembleNet also handles the case when we don't want
2216
        # local storage (i.e. one or more of the _[lr](host|port) is
2217
        # None)
2218
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2219
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2220
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2221
          break
2222
        else:
2223
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2224
                      " show' disagrees", minor)
2225

    
2226
    else:
2227
      minor = None
2228

    
2229
    self._SetFromMinor(minor)
2230
    if minor is None:
2231
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2232
                  self._aminor)
2233

    
2234
  def _FastAssemble(self):
2235
    """Assemble the drbd device from zero.
2236

2237
    This is run when in Assemble we detect our minor is unused.
2238

2239
    """
2240
    minor = self._aminor
2241
    if self._children and self._children[0] and self._children[1]:
2242
      self._AssembleLocal(minor, self._children[0].dev_path,
2243
                          self._children[1].dev_path, self.size)
2244
    if self._lhost and self._lport and self._rhost and self._rport:
2245
      self._AssembleNet(minor,
2246
                        (self._lhost, self._lport, self._rhost, self._rport),
2247
                        constants.DRBD_NET_PROTOCOL,
2248
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2249
    self._SetFromMinor(minor)
2250

    
2251
  @classmethod
2252
  def _ShutdownLocal(cls, minor):
2253
    """Detach from the local device.
2254

2255
    I/Os will continue to be served from the remote device. If we
2256
    don't have a remote device, this operation will fail.
2257

2258
    """
2259
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2260
    if result.failed:
2261
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2262

    
2263
  @classmethod
2264
  def _ShutdownNet(cls, minor):
2265
    """Disconnect from the remote peer.
2266

2267
    This fails if we don't have a local device.
2268

2269
    """
2270
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2271
    if result.failed:
2272
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2273

    
2274
  @classmethod
2275
  def _ShutdownAll(cls, minor):
2276
    """Deactivate the device.
2277

2278
    This will, of course, fail if the device is in use.
2279

2280
    """
2281
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2282
    if result.failed:
2283
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2284
                  minor, result.output)
2285

    
2286
  def Shutdown(self):
2287
    """Shutdown the DRBD device.
2288

2289
    """
2290
    if self.minor is None and not self.Attach():
2291
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2292
      return
2293
    minor = self.minor
2294
    self.minor = None
2295
    self.dev_path = None
2296
    self._ShutdownAll(minor)
2297

    
2298
  def Remove(self):
2299
    """Stub remove for DRBD devices.
2300

2301
    """
2302
    self.Shutdown()
2303

    
2304
  @classmethod
2305
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
2306
    """Create a new DRBD8 device.
2307

2308
    Since DRBD devices are not created per se, just assembled, this
2309
    function only initializes the metadata.
2310

2311
    """
2312
    if len(children) != 2:
2313
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2314
    if excl_stor:
2315
      raise errors.ProgrammerError("DRBD device requested with"
2316
                                   " exclusive_storage")
2317
    # check that the minor is unused
2318
    aminor = unique_id[4]
2319
    proc_info = cls._MassageProcData(cls._GetProcData())
2320
    if aminor in proc_info:
2321
      status = DRBD8Status(proc_info[aminor])
2322
      in_use = status.is_in_use
2323
    else:
2324
      in_use = False
2325
    if in_use:
2326
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2327
    meta = children[1]
2328
    meta.Assemble()
2329
    if not meta.Attach():
2330
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2331
                  aminor, meta)
2332
    cls._CheckMetaSize(meta.dev_path)
2333
    cls._InitMeta(aminor, meta.dev_path)
2334
    return cls(unique_id, children, size, params)
2335

    
2336
  def Grow(self, amount, dryrun, backingstore):
2337
    """Resize the DRBD device and its backing storage.
2338

2339
    """
2340
    if self.minor is None:
2341
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2342
    if len(self._children) != 2 or None in self._children:
2343
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2344
    self._children[0].Grow(amount, dryrun, backingstore)
2345
    if dryrun or backingstore:
2346
      # DRBD does not support dry-run mode and is not backing storage,
2347
      # so we'll return here
2348
      return
2349
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2350
                           "%dm" % (self.size + amount)])
2351
    if result.failed:
2352
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2353

    
2354

    
2355
class FileStorage(BlockDev):
2356
  """File device.
2357

2358
  This class represents a file storage backend device.
2359

2360
  The unique_id for the file device is a (file_driver, file_path) tuple.
2361

2362
  """
2363
  def __init__(self, unique_id, children, size, params, *args):
2364
    """Initalizes a file device backend.
2365

2366
    """
2367
    if children:
2368
      raise errors.BlockDeviceError("Invalid setup for file device")
2369
    super(FileStorage, self).__init__(unique_id, children, size, params)
2370
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2371
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2372
    self.driver = unique_id[0]
2373
    self.dev_path = unique_id[1]
2374

    
2375
    CheckFileStoragePath(self.dev_path)
2376

    
2377
    self.Attach()
2378

    
2379
  def Assemble(self):
2380
    """Assemble the device.
2381

2382
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2383

2384
    """
2385
    if not os.path.exists(self.dev_path):
2386
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2387

    
2388
  def Shutdown(self):
2389
    """Shutdown the device.
2390

2391
    This is a no-op for the file type, as we don't deactivate
2392
    the file on shutdown.
2393

2394
    """
2395
    pass
2396

    
2397
  def Open(self, force=False):
2398
    """Make the device ready for I/O.
2399

2400
    This is a no-op for the file type.
2401

2402
    """
2403
    pass
2404

    
2405
  def Close(self):
2406
    """Notifies that the device will no longer be used for I/O.
2407

2408
    This is a no-op for the file type.
2409

2410
    """
2411
    pass
2412

    
2413
  def Remove(self):
2414
    """Remove the file backing the block device.
2415

2416
    @rtype: boolean
2417
    @return: True if the removal was successful
2418

2419
    """
2420
    try:
2421
      os.remove(self.dev_path)
2422
    except OSError, err:
2423
      if err.errno != errno.ENOENT:
2424
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2425

    
2426
  def Rename(self, new_id):
2427
    """Renames the file.
2428

2429
    """
2430
    # TODO: implement rename for file-based storage
2431
    _ThrowError("Rename is not supported for file-based storage")
2432

    
2433
  def Grow(self, amount, dryrun, backingstore):
2434
    """Grow the file
2435

2436
    @param amount: the amount (in mebibytes) to grow with
2437

2438
    """
2439
    if not backingstore:
2440
      return
2441
    # Check that the file exists
2442
    self.Assemble()
2443
    current_size = self.GetActualSize()
2444
    new_size = current_size + amount * 1024 * 1024
2445
    assert new_size > current_size, "Cannot Grow with a negative amount"
2446
    # We can't really simulate the growth
2447
    if dryrun:
2448
      return
2449
    try:
2450
      f = open(self.dev_path, "a+")
2451
      f.truncate(new_size)
2452
      f.close()
2453
    except EnvironmentError, err:
2454
      _ThrowError("Error in file growth: %", str(err))
2455

    
2456
  def Attach(self):
2457
    """Attach to an existing file.
2458

2459
    Check if this file already exists.
2460

2461
    @rtype: boolean
2462
    @return: True if file exists
2463

2464
    """
2465
    self.attached = os.path.exists(self.dev_path)
2466
    return self.attached
2467

    
2468
  def GetActualSize(self):
2469
    """Return the actual disk size.
2470

2471
    @note: the device needs to be active when this is called
2472

2473
    """
2474
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2475
    try:
2476
      st = os.stat(self.dev_path)
2477
      return st.st_size
2478
    except OSError, err:
2479
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2480

    
2481
  @classmethod
2482
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
2483
    """Create a new file.
2484

2485
    @param size: the size of file in MiB
2486

2487
    @rtype: L{bdev.FileStorage}
2488
    @return: an instance of FileStorage
2489

2490
    """
2491
    if excl_stor:
2492
      raise errors.ProgrammerError("FileStorage device requested with"
2493
                                   " exclusive_storage")
2494
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2495
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2496

    
2497
    dev_path = unique_id[1]
2498

    
2499
    CheckFileStoragePath(dev_path)
2500

    
2501
    try:
2502
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2503
      f = os.fdopen(fd, "w")
2504
      f.truncate(size * 1024 * 1024)
2505
      f.close()
2506
    except EnvironmentError, err:
2507
      if err.errno == errno.EEXIST:
2508
        _ThrowError("File already existing: %s", dev_path)
2509
      _ThrowError("Error in file creation: %", str(err))
2510

    
2511
    return FileStorage(unique_id, children, size, params)
2512

    
2513

    
2514
class PersistentBlockDevice(BlockDev):
2515
  """A block device with persistent node
2516

2517
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2518
  udev helpers are probably required to give persistent, human-friendly
2519
  names.
2520

2521
  For the time being, pathnames are required to lie under /dev.
2522

2523
  """
2524
  def __init__(self, unique_id, children, size, params, *args):
2525
    """Attaches to a static block device.
2526

2527
    The unique_id is a path under /dev.
2528

2529
    """
2530
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2531
                                                params)
2532
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2533
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2534
    self.dev_path = unique_id[1]
2535
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2536
      raise ValueError("Full path '%s' lies outside /dev" %
2537
                              os.path.realpath(self.dev_path))
2538
    # TODO: this is just a safety guard checking that we only deal with devices
2539
    # we know how to handle. In the future this will be integrated with
2540
    # external storage backends and possible values will probably be collected
2541
    # from the cluster configuration.
2542
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2543
      raise ValueError("Got persistent block device of invalid type: %s" %
2544
                       unique_id[0])
2545

    
2546
    self.major = self.minor = None
2547
    self.Attach()
2548

    
2549
  @classmethod
2550
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
2551
    """Create a new device
2552

2553
    This is a noop, we only return a PersistentBlockDevice instance
2554

2555
    """
2556
    if excl_stor:
2557
      raise errors.ProgrammerError("Persistent block device requested with"
2558
                                   " exclusive_storage")
2559
    return PersistentBlockDevice(unique_id, children, 0, params)
2560

    
2561
  def Remove(self):
2562
    """Remove a device
2563

2564
    This is a noop
2565

2566
    """
2567
    pass
2568

    
2569
  def Rename(self, new_id):
2570
    """Rename this device.
2571

2572
    """
2573
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2574

    
2575
  def Attach(self):
2576
    """Attach to an existing block device.
2577

2578

2579
    """
2580
    self.attached = False
2581
    try:
2582
      st = os.stat(self.dev_path)
2583
    except OSError, err:
2584
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2585
      return False
2586

    
2587
    if not stat.S_ISBLK(st.st_mode):
2588
      logging.error("%s is not a block device", self.dev_path)
2589
      return False
2590

    
2591
    self.major = os.major(st.st_rdev)
2592
    self.minor = os.minor(st.st_rdev)
2593
    self.attached = True
2594

    
2595
    return True
2596

    
2597
  def Assemble(self):
2598
    """Assemble the device.
2599

2600
    """
2601
    pass
2602

    
2603
  def Shutdown(self):
2604
    """Shutdown the device.
2605

2606
    """
2607
    pass
2608

    
2609
  def Open(self, force=False):
2610
    """Make the device ready for I/O.
2611

2612
    """
2613
    pass
2614

    
2615
  def Close(self):
2616
    """Notifies that the device will no longer be used for I/O.
2617

2618
    """
2619
    pass
2620

    
2621
  def Grow(self, amount, dryrun, backingstore):
2622
    """Grow the logical volume.
2623

2624
    """
2625
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2626

    
2627

    
2628
class RADOSBlockDevice(BlockDev):
2629
  """A RADOS Block Device (rbd).
2630

2631
  This class implements the RADOS Block Device for the backend. You need
2632
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2633
  this to be functional.
2634

2635
  """
2636
  def __init__(self, unique_id, children, size, params):
2637
    """Attaches to an rbd device.
2638

2639
    """
2640
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2641
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2642
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2643

    
2644
    self.driver, self.rbd_name = unique_id
2645

    
2646
    self.major = self.minor = None
2647
    self.Attach()
2648

    
2649
  @classmethod
2650
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
2651
    """Create a new rbd device.
2652

2653
    Provision a new rbd volume inside a RADOS pool.
2654

2655
    """
2656
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2657
      raise errors.ProgrammerError("Invalid configuration data %s" %
2658
                                   str(unique_id))
2659
    if excl_stor:
2660
      raise errors.ProgrammerError("RBD device requested with"
2661
                                   " exclusive_storage")
2662
    rbd_pool = params[constants.LDP_POOL]
2663
    rbd_name = unique_id[1]
2664

    
2665
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2666
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2667
           rbd_name, "--size", "%s" % size]
2668
    result = utils.RunCmd(cmd)
2669
    if result.failed:
2670
      _ThrowError("rbd creation failed (%s): %s",
2671
                  result.fail_reason, result.output)
2672

    
2673
    return RADOSBlockDevice(unique_id, children, size, params)
2674

    
2675
  def Remove(self):
2676
    """Remove the rbd device.
2677

2678
    """
2679
    rbd_pool = self.params[constants.LDP_POOL]
2680
    rbd_name = self.unique_id[1]
2681

    
2682
    if not self.minor and not self.Attach():
2683
      # The rbd device doesn't exist.
2684
      return
2685

    
2686
    # First shutdown the device (remove mappings).
2687
    self.Shutdown()
2688

    
2689
    # Remove the actual Volume (Image) from the RADOS cluster.
2690
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2691
    result = utils.RunCmd(cmd)
2692
    if result.failed:
2693
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2694
                  result.fail_reason, result.output)
2695

    
2696
  def Rename(self, new_id):
2697
    """Rename this device.
2698

2699
    """
2700
    pass
2701

    
2702
  def Attach(self):
2703
    """Attach to an existing rbd device.
2704

2705
    This method maps the rbd volume that matches our name with
2706
    an rbd device and then attaches to this device.
2707

2708
    """
2709
    self.attached = False
2710

    
2711
    # Map the rbd volume to a block device under /dev
2712
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2713

    
2714
    try:
2715
      st = os.stat(self.dev_path)
2716
    except OSError, err:
2717
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2718
      return False
2719

    
2720
    if not stat.S_ISBLK(st.st_mode):
2721
      logging.error("%s is not a block device", self.dev_path)
2722
      return False
2723

    
2724
    self.major = os.major(st.st_rdev)
2725
    self.minor = os.minor(st.st_rdev)
2726
    self.attached = True
2727

    
2728
    return True
2729

    
2730
  def _MapVolumeToBlockdev(self, unique_id):
2731
    """Maps existing rbd volumes to block devices.
2732

2733
    This method should be idempotent if the mapping already exists.
2734

2735
    @rtype: string
2736
    @return: the block device path that corresponds to the volume
2737

2738
    """
2739
    pool = self.params[constants.LDP_POOL]
2740
    name = unique_id[1]
2741

    
2742
    # Check if the mapping already exists.
2743
    rbd_dev = self._VolumeToBlockdev(pool, name)
2744
    if rbd_dev:
2745
      # The mapping exists. Return it.
2746
      return rbd_dev
2747

    
2748
    # The mapping doesn't exist. Create it.
2749
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2750
    result = utils.RunCmd(map_cmd)
2751
    if result.failed:
2752
      _ThrowError("rbd map failed (%s): %s",
2753
                  result.fail_reason, result.output)
2754

    
2755
    # Find the corresponding rbd device.
2756
    rbd_dev = self._VolumeToBlockdev(pool, name)
2757
    if not rbd_dev:
2758
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2759
                  " device in output of showmapped, for volume: %s", name)
2760

    
2761
    # The device was successfully mapped. Return it.
2762
    return rbd_dev
2763

    
2764
  @classmethod
2765
  def _VolumeToBlockdev(cls, pool, volume_name):
2766
    """Do the 'volume name'-to-'rbd block device' resolving.
2767

2768
    @type pool: string
2769
    @param pool: RADOS pool to use
2770
    @type volume_name: string
2771
    @param volume_name: the name of the volume whose device we search for
2772
    @rtype: string or None
2773
    @return: block device path if the volume is mapped, else None
2774

2775
    """
2776
    try:
2777
      # Newer versions of the rbd tool support json output formatting. Use it
2778
      # if available.
2779
      showmap_cmd = [
2780
        constants.RBD_CMD,
2781
        "showmapped",
2782
        "-p",
2783
        pool,
2784
        "--format",
2785
        "json"
2786
        ]
2787
      result = utils.RunCmd(showmap_cmd)
2788
      if result.failed:
2789
        logging.error("rbd JSON output formatting returned error (%s): %s,"
2790
                      "falling back to plain output parsing",
2791
                      result.fail_reason, result.output)
2792
        raise RbdShowmappedJsonError
2793

    
2794
      return cls._ParseRbdShowmappedJson(result.output, volume_name)
2795
    except RbdShowmappedJsonError:
2796
      # For older versions of rbd, we have to parse the plain / text output
2797
      # manually.
2798
      showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2799
      result = utils.RunCmd(showmap_cmd)
2800
      if result.failed:
2801
        _ThrowError("rbd showmapped failed (%s): %s",
2802
                    result.fail_reason, result.output)
2803

    
2804
      return cls._ParseRbdShowmappedPlain(result.output, volume_name)
2805

    
2806
  @staticmethod
2807
  def _ParseRbdShowmappedJson(output, volume_name):
2808
    """Parse the json output of `rbd showmapped'.
2809

2810
    This method parses the json output of `rbd showmapped' and returns the rbd
2811
    block device path (e.g. /dev/rbd0) that matches the given rbd volume.
2812

2813
    @type output: string
2814
    @param output: the json output of `rbd showmapped'
2815
    @type volume_name: string
2816
    @param volume_name: the name of the volume whose device we search for
2817
    @rtype: string or None
2818
    @return: block device path if the volume is mapped, else None
2819

2820
    """
2821
    try:
2822
      devices = serializer.LoadJson(output)
2823
    except ValueError, err:
2824
      _ThrowError("Unable to parse JSON data: %s" % err)
2825

    
2826
    rbd_dev = None
2827
    for d in devices.values(): # pylint: disable=E1103
2828
      try:
2829
        name = d["name"]
2830
      except KeyError:
2831
        _ThrowError("'name' key missing from json object %s", devices)
2832

    
2833
      if name == volume_name:
2834
        if rbd_dev is not None:
2835
          _ThrowError("rbd volume %s is mapped more than once", volume_name)
2836

    
2837
        rbd_dev = d["device"]
2838

    
2839
    return rbd_dev
2840

    
2841
  @staticmethod
2842
  def _ParseRbdShowmappedPlain(output, volume_name):
2843
    """Parse the (plain / text) output of `rbd showmapped'.
2844

2845
    This method parses the output of `rbd showmapped' and returns
2846
    the rbd block device path (e.g. /dev/rbd0) that matches the
2847
    given rbd volume.
2848

2849
    @type output: string
2850
    @param output: the plain text output of `rbd showmapped'
2851
    @type volume_name: string
2852
    @param volume_name: the name of the volume whose device we search for
2853
    @rtype: string or None
2854
    @return: block device path if the volume is mapped, else None
2855

2856
    """
2857
    allfields = 5
2858
    volumefield = 2
2859
    devicefield = 4
2860

    
2861
    lines = output.splitlines()
2862

    
2863
    # Try parsing the new output format (ceph >= 0.55).
2864
    splitted_lines = map(lambda l: l.split(), lines)
2865

    
2866
    # Check for empty output.
2867
    if not splitted_lines:
2868
      return None
2869

    
2870
    # Check showmapped output, to determine number of fields.
2871
    field_cnt = len(splitted_lines[0])
2872
    if field_cnt != allfields:
2873
      # Parsing the new format failed. Fallback to parsing the old output
2874
      # format (< 0.55).
2875
      splitted_lines = map(lambda l: l.split("\t"), lines)
2876
      if field_cnt != allfields:
2877
        _ThrowError("Cannot parse rbd showmapped output expected %s fields,"
2878
                    " found %s", allfields, field_cnt)
2879

    
2880
    matched_lines = \
2881
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2882
             splitted_lines)
2883

    
2884
    if len(matched_lines) > 1:
2885
      _ThrowError("rbd volume %s mapped more than once", volume_name)
2886

    
2887
    if matched_lines:
2888
      # rbd block device found. Return it.
2889
      rbd_dev = matched_lines[0][devicefield]
2890
      return rbd_dev
2891

    
2892
    # The given volume is not mapped.
2893
    return None
2894

    
2895
  def Assemble(self):
2896
    """Assemble the device.
2897

2898
    """
2899
    pass
2900

    
2901
  def Shutdown(self):
2902
    """Shutdown the device.
2903

2904
    """
2905
    if not self.minor and not self.Attach():
2906
      # The rbd device doesn't exist.
2907
      return
2908

    
2909
    # Unmap the block device from the Volume.
2910
    self._UnmapVolumeFromBlockdev(self.unique_id)
2911

    
2912
    self.minor = None
2913
    self.dev_path = None
2914

    
2915
  def _UnmapVolumeFromBlockdev(self, unique_id):
2916
    """Unmaps the rbd device from the Volume it is mapped.
2917

2918
    Unmaps the rbd device from the Volume it was previously mapped to.
2919
    This method should be idempotent if the Volume isn't mapped.
2920

2921
    """
2922
    pool = self.params[constants.LDP_POOL]
2923
    name = unique_id[1]
2924

    
2925
    # Check if the mapping already exists.
2926
    rbd_dev = self._VolumeToBlockdev(pool, name)
2927

    
2928
    if rbd_dev:
2929
      # The mapping exists. Unmap the rbd device.
2930
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2931
      result = utils.RunCmd(unmap_cmd)
2932
      if result.failed:
2933
        _ThrowError("rbd unmap failed (%s): %s",
2934
                    result.fail_reason, result.output)
2935

    
2936
  def Open(self, force=False):
2937
    """Make the device ready for I/O.
2938

2939
    """
2940
    pass
2941

    
2942
  def Close(self):
2943
    """Notifies that the device will no longer be used for I/O.
2944

2945
    """
2946
    pass
2947

    
2948
  def Grow(self, amount, dryrun, backingstore):
2949
    """Grow the Volume.
2950

2951
    @type amount: integer
2952
    @param amount: the amount (in mebibytes) to grow with
2953
    @type dryrun: boolean
2954
    @param dryrun: whether to execute the operation in simulation mode
2955
        only, without actually increasing the size
2956

2957
    """
2958
    if not backingstore:
2959
      return
2960
    if not self.Attach():
2961
      _ThrowError("Can't attach to rbd device during Grow()")
2962

    
2963
    if dryrun:
2964
      # the rbd tool does not support dry runs of resize operations.
2965
      # Since rbd volumes are thinly provisioned, we assume
2966
      # there is always enough free space for the operation.
2967
      return
2968

    
2969
    rbd_pool = self.params[constants.LDP_POOL]
2970
    rbd_name = self.unique_id[1]
2971
    new_size = self.size + amount
2972

    
2973
    # Resize the rbd volume (Image) inside the RADOS cluster.
2974
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2975
           rbd_name, "--size", "%s" % new_size]
2976
    result = utils.RunCmd(cmd)
2977
    if result.failed:
2978
      _ThrowError("rbd resize failed (%s): %s",
2979
                  result.fail_reason, result.output)
2980

    
2981

    
2982
class ExtStorageDevice(BlockDev):
2983
  """A block device provided by an ExtStorage Provider.
2984

2985
  This class implements the External Storage Interface, which means
2986
  handling of the externally provided block devices.
2987

2988
  """
2989
  def __init__(self, unique_id, children, size, params, *args):
2990
    """Attaches to an extstorage block device.
2991

2992
    """
2993
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params,
2994
                                           *args)
2995

    
2996
    (self.name, self.uuid) = args
2997

    
2998
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2999
      raise ValueError("Invalid configuration data %s" % str(unique_id))
3000

    
3001
    self.driver, self.vol_name = unique_id
3002
    self.ext_params = params
3003

    
3004
    self.major = self.minor = None
3005
    self.Attach()
3006

    
3007
  @classmethod
3008
  def Create(cls, unique_id, children, size, params, excl_stor, *args):
3009
    """Create a new extstorage device.
3010

3011
    Provision a new volume using an extstorage provider, which will
3012
    then be mapped to a block device.
3013

3014
    """
3015
    (name, uuid) = args
3016

    
3017
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
3018
      raise errors.ProgrammerError("Invalid configuration data %s" %
3019
                                   str(unique_id))
3020
    if excl_stor:
3021
      raise errors.ProgrammerError("extstorage device requested with"
3022
                                   " exclusive_storage")
3023

    
3024
    # Call the External Storage's create script,
3025
    # to provision a new Volume inside the External Storage
3026
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
3027
                      params, size=str(size), name=name, uuid=uuid)
3028

    
3029
    return ExtStorageDevice(unique_id, children, size, params, name, uuid)
3030

    
3031
  def Remove(self):
3032
    """Remove the extstorage device.
3033

3034
    """
3035
    if not self.minor and not self.Attach():
3036
      # The extstorage device doesn't exist.
3037
      return
3038

    
3039
    # First shutdown the device (remove mappings).
3040
    self.Shutdown()
3041

    
3042
    # Call the External Storage's remove script,
3043
    # to remove the Volume from the External Storage
3044
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
3045
                      self.ext_params, name=self.name, uuid=self.uuid)
3046

    
3047
  def Rename(self, new_id):
3048
    """Rename this device.
3049

3050
    """
3051
    pass
3052

    
3053
  def Attach(self):
3054
    """Attach to an existing extstorage device.
3055

3056
    This method maps the extstorage volume that matches our name with
3057
    a corresponding block device and then attaches to this device.
3058

3059
    """
3060
    self.attached = False
3061

    
3062
    # Call the External Storage's attach script,
3063
    # to attach an existing Volume to a block device under /dev
3064
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
3065
                                      self.unique_id, self.ext_params,
3066
                                      name=self.name, uuid=self.uuid)
3067

    
3068
    try:
3069
      st = os.stat(self.dev_path)
3070
    except OSError, err:
3071
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
3072
      return False
3073

    
3074
    if not stat.S_ISBLK(st.st_mode):
3075
      logging.error("%s is not a block device", self.dev_path)
3076
      return False
3077

    
3078
    self.major = os.major(st.st_rdev)
3079
    self.minor = os.minor(st.st_rdev)
3080
    self.attached = True
3081

    
3082
    return True
3083

    
3084
  def Assemble(self):
3085
    """Assemble the device.
3086

3087
    """
3088
    pass
3089

    
3090
  def Shutdown(self):
3091
    """Shutdown the device.
3092

3093
    """
3094
    if not self.minor and not self.Attach():
3095
      # The extstorage device doesn't exist.
3096
      return
3097

    
3098
    # Call the External Storage's detach script,
3099
    # to detach an existing Volume from it's block device under /dev
3100
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3101
                      self.ext_params, name=self.name, uuid=self.uuid)
3102

    
3103
    self.minor = None
3104
    self.dev_path = None
3105

    
3106
  def Open(self, force=False):
3107
    """Make the device ready for I/O.
3108

3109
    """
3110
    pass
3111

    
3112
  def Close(self):
3113
    """Notifies that the device will no longer be used for I/O.
3114

3115
    """
3116
    pass
3117

    
3118
  def Grow(self, amount, dryrun, backingstore):
3119
    """Grow the Volume.
3120

3121
    @type amount: integer
3122
    @param amount: the amount (in mebibytes) to grow with
3123
    @type dryrun: boolean
3124
    @param dryrun: whether to execute the operation in simulation mode
3125
        only, without actually increasing the size
3126

3127
    """
3128
    if not backingstore:
3129
      return
3130
    if not self.Attach():
3131
      _ThrowError("Can't attach to extstorage device during Grow()")
3132

    
3133
    if dryrun:
3134
      # we do not support dry runs of resize operations for now.
3135
      return
3136

    
3137
    new_size = self.size + amount
3138

    
3139
    # Call the External Storage's grow script,
3140
    # to grow an existing Volume inside the External Storage
3141
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3142
                      self.ext_params, size=str(self.size), grow=str(new_size),
3143
                      name=self.name, uuid=self.uuid)
3144

    
3145
  def SetInfo(self, text):
3146
    """Update metadata with info text.
3147

3148
    """
3149
    # Replace invalid characters
3150
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3151
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3152

    
3153
    # Only up to 128 characters are allowed
3154
    text = text[:128]
3155

    
3156
    # Call the External Storage's setinfo script,
3157
    # to set metadata for an existing Volume inside the External Storage
3158
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3159
                      self.ext_params, metadata=text,
3160
                      name=self.name, uuid=self.uuid)
3161

    
3162
  def Snapshot(self, snapshot_name):
3163
    """Take a snapshot of the block device.
3164

3165
    """
3166
    # Call the External Storage's setinfo script,
3167
    # to set metadata for an existing Volume inside the External Storage
3168
    _ExtStorageAction(constants.ES_ACTION_SNAPSHOT, self.unique_id,
3169
                      self.ext_params, snapshot_name=snapshot_name,
3170
                      name=self.name, uuid=self.uuid)
3171

    
3172

    
3173
def _ExtStorageAction(action, unique_id, ext_params,
3174
                      size=None, grow=None, metadata=None,
3175
                      snapshot_name=None, name=None, uuid=None):
3176
  """Take an External Storage action.
3177

3178
  Take an External Storage action concerning or affecting
3179
  a specific Volume inside the External Storage.
3180

3181
  @type action: string
3182
  @param action: which action to perform. One of:
3183
                 create / remove / grow / attach / detach
3184
  @type unique_id: tuple (driver, vol_name)
3185
  @param unique_id: a tuple containing the type of ExtStorage (driver)
3186
                    and the Volume name
3187
  @type ext_params: dict
3188
  @param ext_params: ExtStorage parameters
3189
  @type size: integer
3190
  @param size: the size of the Volume in mebibytes
3191
  @type grow: integer
3192
  @param grow: the new size in mebibytes (after grow)
3193
  @type metadata: string
3194
  @param metadata: metadata info of the Volume, for use by the provider
3195
  @rtype: None or a block device path (during attach)
3196

3197
  """
3198
  driver, vol_name = unique_id
3199

    
3200
  # Create an External Storage instance of type `driver'
3201
  status, inst_es = ExtStorageFromDisk(driver)
3202
  if not status:
3203
    _ThrowError("%s" % inst_es)
3204

    
3205
  # Create the basic environment for the driver's scripts
3206
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3207
                                      grow, metadata, snapshot_name,
3208
                                      name, uuid)
3209

    
3210
  # Do not use log file for action `attach' as we need
3211
  # to get the output from RunResult
3212
  # TODO: find a way to have a log file for attach too
3213
  logfile = None
3214
  if action is not constants.ES_ACTION_ATTACH:
3215
    logfile = _VolumeLogName(action, driver, vol_name)
3216

    
3217
  # Make sure the given action results in a valid script
3218
  if action not in constants.ES_SCRIPTS:
3219
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3220
                action)
3221

    
3222
  # Find out which external script to run according the given action
3223
  script_name = action + "_script"
3224
  script = getattr(inst_es, script_name)
3225

    
3226
  # Run the external script
3227
  result = utils.RunCmd([script], env=create_env,
3228
                        cwd=inst_es.path, output=logfile,)
3229
  if result.failed:
3230
    logging.error("External storage's %s command '%s' returned"
3231
                  " error: %s, logfile: %s, output: %s",
3232
                  action, result.cmd, result.fail_reason,
3233
                  logfile, result.output)
3234

    
3235
    # If logfile is 'None' (during attach), it breaks TailFile
3236
    # TODO: have a log file for attach too
3237
    if action is not constants.ES_ACTION_ATTACH:
3238
      lines = [utils.SafeEncode(val)
3239
               for val in utils.TailFile(logfile, lines=20)]
3240
    else:
3241
      lines = result.output[-20:]
3242

    
3243
    _ThrowError("External storage's %s script failed (%s), last"
3244
                " lines of output:\n%s",
3245
                action, result.fail_reason, "\n".join(lines))
3246

    
3247
  if action == constants.ES_ACTION_ATTACH:
3248
    return result.stdout
3249

    
3250

    
3251
def ExtStorageFromDisk(name, base_dir=None):
3252
  """Create an ExtStorage instance from disk.
3253

3254
  This function will return an ExtStorage instance
3255
  if the given name is a valid ExtStorage name.
3256

3257
  @type base_dir: string
3258
  @keyword base_dir: Base directory containing ExtStorage installations.
3259
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3260
  @rtype: tuple
3261
  @return: True and the ExtStorage instance if we find a valid one, or
3262
      False and the diagnose message on error
3263

3264
  """
3265
  if base_dir is None:
3266
    es_base_dir = pathutils.ES_SEARCH_PATH
3267
  else:
3268
    es_base_dir = [base_dir]
3269

    
3270
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3271

    
3272
  if es_dir is None:
3273
    return False, ("Directory for External Storage Provider %s not"
3274
                   " found in search path" % name)
3275

    
3276
  # ES Files dictionary, we will populate it with the absolute path
3277
  # names; if the value is True, then it is a required file, otherwise
3278
  # an optional one
3279
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3280

    
3281
  es_files[constants.ES_PARAMETERS_FILE] = True
3282

    
3283
  for (filename, _) in es_files.items():
3284
    es_files[filename] = utils.PathJoin(es_dir, filename)
3285

    
3286
    try:
3287
      st = os.stat(es_files[filename])
3288
    except EnvironmentError, err:
3289
      return False, ("File '%s' under path '%s' is missing (%s)" %
3290
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3291

    
3292
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3293
      return False, ("File '%s' under path '%s' is not a regular file" %
3294
                     (filename, es_dir))
3295

    
3296
    if filename in constants.ES_SCRIPTS:
3297
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3298
        return False, ("File '%s' under path '%s' is not executable" %
3299
                       (filename, es_dir))
3300

    
3301
  parameters = []
3302
  if constants.ES_PARAMETERS_FILE in es_files:
3303
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3304
    try:
3305
      parameters = utils.ReadFile(parameters_file).splitlines()
3306
    except EnvironmentError, err:
3307
      return False, ("Error while reading the EXT parameters file at %s: %s" %
3308
                     (parameters_file, utils.ErrnoOrStr(err)))
3309
    parameters = [v.split(None, 1) for v in parameters]
3310

    
3311
  es_obj = \
3312
    objects.ExtStorage(name=name, path=es_dir,
3313
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3314
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3315
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3316
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3317
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3318
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3319
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3320
                       snapshot_script=es_files[constants.ES_SCRIPT_SNAPSHOT],
3321
                       supported_parameters=parameters)
3322
  return True, es_obj
3323

    
3324

    
3325
def _ExtStorageEnvironment(unique_id, ext_params,
3326
                           size=None, grow=None, metadata=None,
3327
                           snapshot_name=None, name=None, uuid=None):
3328
  """Calculate the environment for an External Storage script.
3329

3330
  @type unique_id: tuple (driver, vol_name)
3331
  @param unique_id: ExtStorage pool and name of the Volume
3332
  @type ext_params: dict
3333
  @param ext_params: the EXT parameters
3334
  @type size: string
3335
  @param size: size of the Volume (in mebibytes)
3336
  @type grow: string
3337
  @param grow: new size of Volume after grow (in mebibytes)
3338
  @type metadata: string
3339
  @param metadata: metadata info of the Volume
3340
  @rtype: dict
3341
  @return: dict of environment variables
3342

3343
  """
3344
  vol_name = unique_id[1]
3345

    
3346
  result = {}
3347
  result["VOL_NAME"] = vol_name
3348

    
3349
  # EXT params
3350
  for pname, pvalue in ext_params.items():
3351
    result["EXTP_%s" % pname.upper()] = str(pvalue)
3352

    
3353
  if size is not None:
3354
    result["VOL_SIZE"] = size
3355

    
3356
  if grow is not None:
3357
    result["VOL_NEW_SIZE"] = grow
3358

    
3359
  if metadata is not None:
3360
    result["VOL_METADATA"] = metadata
3361

    
3362
  if snapshot_name is not None:
3363
    result["VOL_SNAPSHOT_NAME"] = snapshot_name
3364

    
3365
  if name is not None:
3366
    result["VOL_CNAME"] = name
3367

    
3368
  if uuid is not None:
3369
    result["VOL_UUID"] = uuid
3370

    
3371
  return result
3372

    
3373

    
3374
def _VolumeLogName(kind, es_name, volume):
3375
  """Compute the ExtStorage log filename for a given Volume and operation.
3376

3377
  @type kind: string
3378
  @param kind: the operation type (e.g. create, remove etc.)
3379
  @type es_name: string
3380
  @param es_name: the ExtStorage name
3381
  @type volume: string
3382
  @param volume: the name of the Volume inside the External Storage
3383

3384
  """
3385
  # Check if the extstorage log dir is a valid dir
3386
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3387
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3388

    
3389
  # TODO: Use tempfile.mkstemp to create unique filename
3390
  base = ("%s-%s-%s-%s.log" %
3391
          (kind, es_name, volume, utils.TimestampForFilename()))
3392
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3393

    
3394

    
3395
DEV_MAP = {
3396
  constants.LD_LV: LogicalVolume,
3397
  constants.LD_DRBD8: DRBD8,
3398
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3399
  constants.LD_RBD: RADOSBlockDevice,
3400
  constants.LD_EXT: ExtStorageDevice,
3401
  }
3402

    
3403
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3404
  DEV_MAP[constants.LD_FILE] = FileStorage
3405

    
3406

    
3407
def _VerifyDiskType(dev_type):
3408
  if dev_type not in DEV_MAP:
3409
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3410

    
3411

    
3412
def _VerifyDiskParams(disk):
3413
  """Verifies if all disk parameters are set.
3414

3415
  """
3416
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3417
  if missing:
3418
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3419
                                 missing)
3420

    
3421

    
3422
def FindDevice(disk, children):
3423
  """Search for an existing, assembled device.
3424

3425
  This will succeed only if the device exists and is assembled, but it
3426
  does not do any actions in order to activate the device.
3427

3428
  @type disk: L{objects.Disk}
3429
  @param disk: the disk object to find
3430
  @type children: list of L{bdev.BlockDev}
3431
  @param children: the list of block devices that are children of the device
3432
                  represented by the disk parameter
3433

3434
  """
3435
  _VerifyDiskType(disk.dev_type)
3436
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3437
                                  disk.params, disk.name, disk.uuid)
3438
  if not device.attached:
3439
    return None
3440
  return device
3441

    
3442

    
3443
def Assemble(disk, children):
3444
  """Try to attach or assemble an existing device.
3445

3446
  This will attach to assemble the device, as needed, to bring it
3447
  fully up. It must be safe to run on already-assembled devices.
3448

3449
  @type disk: L{objects.Disk}
3450
  @param disk: the disk object to assemble
3451
  @type children: list of L{bdev.BlockDev}
3452
  @param children: the list of block devices that are children of the device
3453
                  represented by the disk parameter
3454

3455
  """
3456
  _VerifyDiskType(disk.dev_type)
3457
  _VerifyDiskParams(disk)
3458
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3459
                                  disk.params, disk.name, disk.uuid)
3460
  device.Assemble()
3461
  return device
3462

    
3463

    
3464
def Create(disk, children, excl_stor):
3465
  """Create a device.
3466

3467
  @type disk: L{objects.Disk}
3468
  @param disk: the disk object to create
3469
  @type children: list of L{bdev.BlockDev}
3470
  @param children: the list of block devices that are children of the device
3471
                  represented by the disk parameter
3472
  @type excl_stor: boolean
3473
  @param excl_stor: Whether exclusive_storage is active
3474

3475
  """
3476
  _VerifyDiskType(disk.dev_type)
3477
  _VerifyDiskParams(disk)
3478
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3479
                                         disk.params, excl_stor,
3480
                                         disk.name, disk.uuid)
3481
  return device