Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ b496abdb

History | View | Annotate | Download (107.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41

    
42

    
43
# Size of reads in _CanReadDevice
44
_DEVICE_READ_SIZE = 128 * 1024
45

    
46

    
47
def _IgnoreError(fn, *args, **kwargs):
48
  """Executes the given function, ignoring BlockDeviceErrors.
49

50
  This is used in order to simplify the execution of cleanup or
51
  rollback functions.
52

53
  @rtype: boolean
54
  @return: True when fn didn't raise an exception, False otherwise
55

56
  """
57
  try:
58
    fn(*args, **kwargs)
59
    return True
60
  except errors.BlockDeviceError, err:
61
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
62
    return False
63

    
64

    
65
def _ThrowError(msg, *args):
66
  """Log an error to the node daemon and the raise an exception.
67

68
  @type msg: string
69
  @param msg: the text of the exception
70
  @raise errors.BlockDeviceError
71

72
  """
73
  if args:
74
    msg = msg % args
75
  logging.error(msg)
76
  raise errors.BlockDeviceError(msg)
77

    
78

    
79
def _CheckResult(result):
80
  """Throws an error if the given result is a failed one.
81

82
  @param result: result from RunCmd
83

84
  """
85
  if result.failed:
86
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
87
                result.output)
88

    
89

    
90
def _CanReadDevice(path):
91
  """Check if we can read from the given device.
92

93
  This tries to read the first 128k of the device.
94

95
  """
96
  try:
97
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
98
    return True
99
  except EnvironmentError:
100
    logging.warning("Can't read from device %s", path, exc_info=True)
101
    return False
102

    
103

    
104
def _GetForbiddenFileStoragePaths():
105
  """Builds a list of path prefixes which shouldn't be used for file storage.
106

107
  @rtype: frozenset
108

109
  """
110
  paths = set([
111
    "/boot",
112
    "/dev",
113
    "/etc",
114
    "/home",
115
    "/proc",
116
    "/root",
117
    "/sys",
118
    ])
119

    
120
  for prefix in ["", "/usr", "/usr/local"]:
121
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
122
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
123

    
124
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
125

    
126

    
127
def _ComputeWrongFileStoragePaths(paths,
128
                                  _forbidden=_GetForbiddenFileStoragePaths()):
129
  """Cross-checks a list of paths for prefixes considered bad.
130

131
  Some paths, e.g. "/bin", should not be used for file storage.
132

133
  @type paths: list
134
  @param paths: List of paths to be checked
135
  @rtype: list
136
  @return: Sorted list of paths for which the user should be warned
137

138
  """
139
  def _Check(path):
140
    return (not os.path.isabs(path) or
141
            path in _forbidden or
142
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
143

    
144
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
145

    
146

    
147
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
148
  """Returns a list of file storage paths whose prefix is considered bad.
149

150
  See L{_ComputeWrongFileStoragePaths}.
151

152
  """
153
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
154

    
155

    
156
def _CheckFileStoragePath(path, allowed):
157
  """Checks if a path is in a list of allowed paths for file storage.
158

159
  @type path: string
160
  @param path: Path to check
161
  @type allowed: list
162
  @param allowed: List of allowed paths
163
  @raise errors.FileStoragePathError: If the path is not allowed
164

165
  """
166
  if not os.path.isabs(path):
167
    raise errors.FileStoragePathError("File storage path must be absolute,"
168
                                      " got '%s'" % path)
169

    
170
  for i in allowed:
171
    if not os.path.isabs(i):
172
      logging.info("Ignoring relative path '%s' for file storage", i)
173
      continue
174

    
175
    if utils.IsBelowDir(i, path):
176
      break
177
  else:
178
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
179
                                      " storage" % path)
180

    
181

    
182
def _LoadAllowedFileStoragePaths(filename):
183
  """Loads file containing allowed file storage paths.
184

185
  @rtype: list
186
  @return: List of allowed paths (can be an empty list)
187

188
  """
189
  try:
190
    contents = utils.ReadFile(filename)
191
  except EnvironmentError:
192
    return []
193
  else:
194
    return utils.FilterEmptyLinesAndComments(contents)
195

    
196

    
197
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
198
  """Checks if a path is allowed for file storage.
199

200
  @type path: string
201
  @param path: Path to check
202
  @raise errors.FileStoragePathError: If the path is not allowed
203

204
  """
205
  allowed = _LoadAllowedFileStoragePaths(_filename)
206

    
207
  if _ComputeWrongFileStoragePaths([path]):
208
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
209
                                      path)
210

    
211
  _CheckFileStoragePath(path, allowed)
212

    
213

    
214
class BlockDev(object):
215
  """Block device abstract class.
216

217
  A block device can be in the following states:
218
    - not existing on the system, and by `Create()` it goes into:
219
    - existing but not setup/not active, and by `Assemble()` goes into:
220
    - active read-write and by `Open()` it goes into
221
    - online (=used, or ready for use)
222

223
  A device can also be online but read-only, however we are not using
224
  the readonly state (LV has it, if needed in the future) and we are
225
  usually looking at this like at a stack, so it's easier to
226
  conceptualise the transition from not-existing to online and back
227
  like a linear one.
228

229
  The many different states of the device are due to the fact that we
230
  need to cover many device types:
231
    - logical volumes are created, lvchange -a y $lv, and used
232
    - drbd devices are attached to a local disk/remote peer and made primary
233

234
  A block device is identified by three items:
235
    - the /dev path of the device (dynamic)
236
    - a unique ID of the device (static)
237
    - it's major/minor pair (dynamic)
238

239
  Not all devices implement both the first two as distinct items. LVM
240
  logical volumes have their unique ID (the pair volume group, logical
241
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
242
  the /dev path is again dynamic and the unique id is the pair (host1,
243
  dev1), (host2, dev2).
244

245
  You can get to a device in two ways:
246
    - creating the (real) device, which returns you
247
      an attached instance (lvcreate)
248
    - attaching of a python instance to an existing (real) device
249

250
  The second point, the attachement to a device, is different
251
  depending on whether the device is assembled or not. At init() time,
252
  we search for a device with the same unique_id as us. If found,
253
  good. It also means that the device is already assembled. If not,
254
  after assembly we'll have our correct major/minor.
255

256
  """
257
  def __init__(self, unique_id, children, size, params):
258
    self._children = children
259
    self.dev_path = None
260
    self.unique_id = unique_id
261
    self.major = None
262
    self.minor = None
263
    self.attached = False
264
    self.size = size
265
    self.params = params
266

    
267
  def Assemble(self):
268
    """Assemble the device from its components.
269

270
    Implementations of this method by child classes must ensure that:
271
      - after the device has been assembled, it knows its major/minor
272
        numbers; this allows other devices (usually parents) to probe
273
        correctly for their children
274
      - calling this method on an existing, in-use device is safe
275
      - if the device is already configured (and in an OK state),
276
        this method is idempotent
277

278
    """
279
    pass
280

    
281
  def Attach(self):
282
    """Find a device which matches our config and attach to it.
283

284
    """
285
    raise NotImplementedError
286

    
287
  def Close(self):
288
    """Notifies that the device will no longer be used for I/O.
289

290
    """
291
    raise NotImplementedError
292

    
293
  @classmethod
294
  def Create(cls, unique_id, children, size, params, excl_stor):
295
    """Create the device.
296

297
    If the device cannot be created, it will return None
298
    instead. Error messages go to the logging system.
299

300
    Note that for some devices, the unique_id is used, and for other,
301
    the children. The idea is that these two, taken together, are
302
    enough for both creation and assembly (later).
303

304
    """
305
    raise NotImplementedError
306

    
307
  def Remove(self):
308
    """Remove this device.
309

310
    This makes sense only for some of the device types: LV and file
311
    storage. Also note that if the device can't attach, the removal
312
    can't be completed.
313

314
    """
315
    raise NotImplementedError
316

    
317
  def Rename(self, new_id):
318
    """Rename this device.
319

320
    This may or may not make sense for a given device type.
321

322
    """
323
    raise NotImplementedError
324

    
325
  def Open(self, force=False):
326
    """Make the device ready for use.
327

328
    This makes the device ready for I/O. For now, just the DRBD
329
    devices need this.
330

331
    The force parameter signifies that if the device has any kind of
332
    --force thing, it should be used, we know what we are doing.
333

334
    """
335
    raise NotImplementedError
336

    
337
  def Shutdown(self):
338
    """Shut down the device, freeing its children.
339

340
    This undoes the `Assemble()` work, except for the child
341
    assembling; as such, the children on the device are still
342
    assembled after this call.
343

344
    """
345
    raise NotImplementedError
346

    
347
  def SetSyncParams(self, params):
348
    """Adjust the synchronization parameters of the mirror.
349

350
    In case this is not a mirroring device, this is no-op.
351

352
    @param params: dictionary of LD level disk parameters related to the
353
    synchronization.
354
    @rtype: list
355
    @return: a list of error messages, emitted both by the current node and by
356
    children. An empty list means no errors.
357

358
    """
359
    result = []
360
    if self._children:
361
      for child in self._children:
362
        result.extend(child.SetSyncParams(params))
363
    return result
364

    
365
  def PauseResumeSync(self, pause):
366
    """Pause/Resume the sync of the mirror.
367

368
    In case this is not a mirroring device, this is no-op.
369

370
    @param pause: Whether to pause or resume
371

372
    """
373
    result = True
374
    if self._children:
375
      for child in self._children:
376
        result = result and child.PauseResumeSync(pause)
377
    return result
378

    
379
  def GetSyncStatus(self):
380
    """Returns the sync status of the device.
381

382
    If this device is a mirroring device, this function returns the
383
    status of the mirror.
384

385
    If sync_percent is None, it means the device is not syncing.
386

387
    If estimated_time is None, it means we can't estimate
388
    the time needed, otherwise it's the time left in seconds.
389

390
    If is_degraded is True, it means the device is missing
391
    redundancy. This is usually a sign that something went wrong in
392
    the device setup, if sync_percent is None.
393

394
    The ldisk parameter represents the degradation of the local
395
    data. This is only valid for some devices, the rest will always
396
    return False (not degraded).
397

398
    @rtype: objects.BlockDevStatus
399

400
    """
401
    return objects.BlockDevStatus(dev_path=self.dev_path,
402
                                  major=self.major,
403
                                  minor=self.minor,
404
                                  sync_percent=None,
405
                                  estimated_time=None,
406
                                  is_degraded=False,
407
                                  ldisk_status=constants.LDS_OKAY)
408

    
409
  def CombinedSyncStatus(self):
410
    """Calculate the mirror status recursively for our children.
411

412
    The return value is the same as for `GetSyncStatus()` except the
413
    minimum percent and maximum time are calculated across our
414
    children.
415

416
    @rtype: objects.BlockDevStatus
417

418
    """
419
    status = self.GetSyncStatus()
420

    
421
    min_percent = status.sync_percent
422
    max_time = status.estimated_time
423
    is_degraded = status.is_degraded
424
    ldisk_status = status.ldisk_status
425

    
426
    if self._children:
427
      for child in self._children:
428
        child_status = child.GetSyncStatus()
429

    
430
        if min_percent is None:
431
          min_percent = child_status.sync_percent
432
        elif child_status.sync_percent is not None:
433
          min_percent = min(min_percent, child_status.sync_percent)
434

    
435
        if max_time is None:
436
          max_time = child_status.estimated_time
437
        elif child_status.estimated_time is not None:
438
          max_time = max(max_time, child_status.estimated_time)
439

    
440
        is_degraded = is_degraded or child_status.is_degraded
441

    
442
        if ldisk_status is None:
443
          ldisk_status = child_status.ldisk_status
444
        elif child_status.ldisk_status is not None:
445
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
446

    
447
    return objects.BlockDevStatus(dev_path=self.dev_path,
448
                                  major=self.major,
449
                                  minor=self.minor,
450
                                  sync_percent=min_percent,
451
                                  estimated_time=max_time,
452
                                  is_degraded=is_degraded,
453
                                  ldisk_status=ldisk_status)
454

    
455
  def SetInfo(self, text):
456
    """Update metadata with info text.
457

458
    Only supported for some device types.
459

460
    """
461
    for child in self._children:
462
      child.SetInfo(text)
463

    
464
  def Grow(self, amount, dryrun, backingstore):
465
    """Grow the block device.
466

467
    @type amount: integer
468
    @param amount: the amount (in mebibytes) to grow with
469
    @type dryrun: boolean
470
    @param dryrun: whether to execute the operation in simulation mode
471
        only, without actually increasing the size
472
    @param backingstore: whether to execute the operation on backing storage
473
        only, or on "logical" storage only; e.g. DRBD is logical storage,
474
        whereas LVM, file, RBD are backing storage
475

476
    """
477
    raise NotImplementedError
478

    
479
  def GetActualSize(self):
480
    """Return the actual disk size.
481

482
    @note: the device needs to be active when this is called
483

484
    """
485
    assert self.attached, "BlockDevice not attached in GetActualSize()"
486
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
487
    if result.failed:
488
      _ThrowError("blockdev failed (%s): %s",
489
                  result.fail_reason, result.output)
490
    try:
491
      sz = int(result.output.strip())
492
    except (ValueError, TypeError), err:
493
      _ThrowError("Failed to parse blockdev output: %s", str(err))
494
    return sz
495

    
496
  def __repr__(self):
497
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
498
            (self.__class__, self.unique_id, self._children,
499
             self.major, self.minor, self.dev_path))
500

    
501

    
502
class LogicalVolume(BlockDev):
503
  """Logical Volume block device.
504

505
  """
506
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
507
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
508
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
509

    
510
  def __init__(self, unique_id, children, size, params):
511
    """Attaches to a LV device.
512

513
    The unique_id is a tuple (vg_name, lv_name)
514

515
    """
516
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
517
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
518
      raise ValueError("Invalid configuration data %s" % str(unique_id))
519
    self._vg_name, self._lv_name = unique_id
520
    self._ValidateName(self._vg_name)
521
    self._ValidateName(self._lv_name)
522
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
523
    self._degraded = True
524
    self.major = self.minor = self.pe_size = self.stripe_count = None
525
    self.Attach()
526

    
527
  @staticmethod
528
  def _GetStdPvSize(pvs_info):
529
    """Return the the standard PV size (used with exclusive storage).
530

531
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
532
    @rtype: float
533
    @return: size in MiB
534

535
    """
536
    assert len(pvs_info) > 0
537
    smallest = min([pv.size for pv in pvs_info])
538
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
539

    
540
  @staticmethod
541
  def _ComputeNumPvs(size, pvs_info):
542
    """Compute the number of PVs needed for an LV (with exclusive storage).
543

544
    @type size: float
545
    @param size: LV size in MiB
546
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
547
    @rtype: integer
548
    @return: number of PVs needed
549
    """
550
    assert len(pvs_info) > 0
551
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
552
    return int(math.ceil(float(size) / pv_size))
553

    
554
  @staticmethod
555
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
556
    """Return a list of empty PVs, by name.
557

558
    """
559
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
560
    if max_pvs is not None:
561
      empty_pvs = empty_pvs[:max_pvs]
562
    return map((lambda pv: pv.name), empty_pvs)
563

    
564
  @classmethod
565
  def Create(cls, unique_id, children, size, params, excl_stor):
566
    """Create a new logical volume.
567

568
    """
569
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
570
      raise errors.ProgrammerError("Invalid configuration data %s" %
571
                                   str(unique_id))
572
    vg_name, lv_name = unique_id
573
    cls._ValidateName(vg_name)
574
    cls._ValidateName(lv_name)
575
    pvs_info = cls.GetPVInfo([vg_name])
576
    if not pvs_info:
577
      if excl_stor:
578
        msg = "No (empty) PVs found"
579
      else:
580
        msg = "Can't compute PV info for vg %s" % vg_name
581
      _ThrowError(msg)
582
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
583

    
584
    pvlist = [pv.name for pv in pvs_info]
585
    if compat.any(":" in v for v in pvlist):
586
      _ThrowError("Some of your PVs have the invalid character ':' in their"
587
                  " name, this is not supported - please filter them out"
588
                  " in lvm.conf using either 'filter' or 'preferred_names'")
589

    
590
    current_pvs = len(pvlist)
591
    desired_stripes = params[constants.LDP_STRIPES]
592
    stripes = min(current_pvs, desired_stripes)
593

    
594
    if excl_stor:
595
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
596
      if err_msgs:
597
        for m in err_msgs:
598
          logging.warning(m)
599
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
600
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
601
      current_pvs = len(pvlist)
602
      if current_pvs < req_pvs:
603
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
604
                    " %d available, %d needed", size, current_pvs, req_pvs)
605
      assert current_pvs == len(pvlist)
606
      if stripes > current_pvs:
607
        # No warning issued for this, as it's no surprise
608
        stripes = current_pvs
609

    
610
    else:
611
      if stripes < desired_stripes:
612
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
613
                        " available.", desired_stripes, vg_name, current_pvs)
614
      free_size = sum([pv.free for pv in pvs_info])
615
      # The size constraint should have been checked from the master before
616
      # calling the create function.
617
      if free_size < size:
618
        _ThrowError("Not enough free space: required %s,"
619
                    " available %s", size, free_size)
620

    
621
    # If the free space is not well distributed, we won't be able to
622
    # create an optimally-striped volume; in that case, we want to try
623
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
624
    # stripes
625
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
626
    for stripes_arg in range(stripes, 0, -1):
627
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
628
      if not result.failed:
629
        break
630
    if result.failed:
631
      _ThrowError("LV create failed (%s): %s",
632
                  result.fail_reason, result.output)
633
    return LogicalVolume(unique_id, children, size, params)
634

    
635
  @staticmethod
636
  def _GetVolumeInfo(lvm_cmd, fields):
637
    """Returns LVM Volumen infos using lvm_cmd
638

639
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
640
    @param fields: Fields to return
641
    @return: A list of dicts each with the parsed fields
642

643
    """
644
    if not fields:
645
      raise errors.ProgrammerError("No fields specified")
646

    
647
    sep = "|"
648
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
649
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
650

    
651
    result = utils.RunCmd(cmd)
652
    if result.failed:
653
      raise errors.CommandError("Can't get the volume information: %s - %s" %
654
                                (result.fail_reason, result.output))
655

    
656
    data = []
657
    for line in result.stdout.splitlines():
658
      splitted_fields = line.strip().split(sep)
659

    
660
      if len(fields) != len(splitted_fields):
661
        raise errors.CommandError("Can't parse %s output: line '%s'" %
662
                                  (lvm_cmd, line))
663

    
664
      data.append(splitted_fields)
665

    
666
    return data
667

    
668
  @classmethod
669
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
670
    """Get the free space info for PVs in a volume group.
671

672
    @param vg_names: list of volume group names, if empty all will be returned
673
    @param filter_allocatable: whether to skip over unallocatable PVs
674
    @param include_lvs: whether to include a list of LVs hosted on each PV
675

676
    @rtype: list
677
    @return: list of objects.LvmPvInfo objects
678

679
    """
680
    # We request "lv_name" field only if we care about LVs, so we don't get
681
    # a long list of entries with many duplicates unless we really have to.
682
    # The duplicate "pv_name" field will be ignored.
683
    if include_lvs:
684
      lvfield = "lv_name"
685
    else:
686
      lvfield = "pv_name"
687
    try:
688
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
689
                                        "pv_attr", "pv_size", lvfield])
690
    except errors.GenericError, err:
691
      logging.error("Can't get PV information: %s", err)
692
      return None
693

    
694
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
695
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
696
    # out duplicates.
697
    if include_lvs:
698
      info.sort(key=(lambda i: (i[0], i[5])))
699
    data = []
700
    lastpvi = None
701
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
702
      # (possibly) skip over pvs which are not allocatable
703
      if filter_allocatable and pv_attr[0] != "a":
704
        continue
705
      # (possibly) skip over pvs which are not in the right volume group(s)
706
      if vg_names and vg_name not in vg_names:
707
        continue
708
      # Beware of duplicates (check before inserting)
709
      if lastpvi and lastpvi.name == pv_name:
710
        if include_lvs and lv_name:
711
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
712
            lastpvi.lv_list.append(lv_name)
713
      else:
714
        if include_lvs and lv_name:
715
          lvl = [lv_name]
716
        else:
717
          lvl = []
718
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
719
                                    size=float(pv_size), free=float(pv_free),
720
                                    attributes=pv_attr, lv_list=lvl)
721
        data.append(lastpvi)
722

    
723
    return data
724

    
725
  @classmethod
726
  def _GetExclusiveStorageVgFree(cls, vg_name):
727
    """Return the free disk space in the given VG, in exclusive storage mode.
728

729
    @type vg_name: string
730
    @param vg_name: VG name
731
    @rtype: float
732
    @return: free space in MiB
733
    """
734
    pvs_info = cls.GetPVInfo([vg_name])
735
    if not pvs_info:
736
      return 0.0
737
    pv_size = cls._GetStdPvSize(pvs_info)
738
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
739
    return pv_size * num_pvs
740

    
741
  @classmethod
742
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
743
    """Get the free space info for specific VGs.
744

745
    @param vg_names: list of volume group names, if empty all will be returned
746
    @param excl_stor: whether exclusive_storage is enabled
747
    @param filter_readonly: whether to skip over readonly VGs
748

749
    @rtype: list
750
    @return: list of tuples (free_space, total_size, name) with free_space in
751
             MiB
752

753
    """
754
    try:
755
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
756
                                        "vg_size"])
757
    except errors.GenericError, err:
758
      logging.error("Can't get VG information: %s", err)
759
      return None
760

    
761
    data = []
762
    for vg_name, vg_free, vg_attr, vg_size in info:
763
      # (possibly) skip over vgs which are not writable
764
      if filter_readonly and vg_attr[0] == "r":
765
        continue
766
      # (possibly) skip over vgs which are not in the right volume group(s)
767
      if vg_names and vg_name not in vg_names:
768
        continue
769
      # Exclusive storage needs a different concept of free space
770
      if excl_stor:
771
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
772
        assert es_free <= vg_free
773
        vg_free = es_free
774
      data.append((float(vg_free), float(vg_size), vg_name))
775

    
776
    return data
777

    
778
  @classmethod
779
  def _ValidateName(cls, name):
780
    """Validates that a given name is valid as VG or LV name.
781

782
    The list of valid characters and restricted names is taken out of
783
    the lvm(8) manpage, with the simplification that we enforce both
784
    VG and LV restrictions on the names.
785

786
    """
787
    if (not cls._VALID_NAME_RE.match(name) or
788
        name in cls._INVALID_NAMES or
789
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
790
      _ThrowError("Invalid LVM name '%s'", name)
791

    
792
  def Remove(self):
793
    """Remove this logical volume.
794

795
    """
796
    if not self.minor and not self.Attach():
797
      # the LV does not exist
798
      return
799
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
800
                           (self._vg_name, self._lv_name)])
801
    if result.failed:
802
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
803

    
804
  def Rename(self, new_id):
805
    """Rename this logical volume.
806

807
    """
808
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
809
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
810
    new_vg, new_name = new_id
811
    if new_vg != self._vg_name:
812
      raise errors.ProgrammerError("Can't move a logical volume across"
813
                                   " volume groups (from %s to to %s)" %
814
                                   (self._vg_name, new_vg))
815
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
816
    if result.failed:
817
      _ThrowError("Failed to rename the logical volume: %s", result.output)
818
    self._lv_name = new_name
819
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
820

    
821
  def Attach(self):
822
    """Attach to an existing LV.
823

824
    This method will try to see if an existing and active LV exists
825
    which matches our name. If so, its major/minor will be
826
    recorded.
827

828
    """
829
    self.attached = False
830
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
831
                           "--units=m", "--nosuffix",
832
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
833
                           "vg_extent_size,stripes", self.dev_path])
834
    if result.failed:
835
      logging.error("Can't find LV %s: %s, %s",
836
                    self.dev_path, result.fail_reason, result.output)
837
      return False
838
    # the output can (and will) have multiple lines for multi-segment
839
    # LVs, as the 'stripes' parameter is a segment one, so we take
840
    # only the last entry, which is the one we're interested in; note
841
    # that with LVM2 anyway the 'stripes' value must be constant
842
    # across segments, so this is a no-op actually
843
    out = result.stdout.splitlines()
844
    if not out: # totally empty result? splitlines() returns at least
845
                # one line for any non-empty string
846
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
847
      return False
848
    out = out[-1].strip().rstrip(",")
849
    out = out.split(",")
850
    if len(out) != 5:
851
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
852
      return False
853

    
854
    status, major, minor, pe_size, stripes = out
855
    if len(status) < 6:
856
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
857
      return False
858

    
859
    try:
860
      major = int(major)
861
      minor = int(minor)
862
    except (TypeError, ValueError), err:
863
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
864

    
865
    try:
866
      pe_size = int(float(pe_size))
867
    except (TypeError, ValueError), err:
868
      logging.error("Can't parse vg extent size: %s", err)
869
      return False
870

    
871
    try:
872
      stripes = int(stripes)
873
    except (TypeError, ValueError), err:
874
      logging.error("Can't parse the number of stripes: %s", err)
875
      return False
876

    
877
    self.major = major
878
    self.minor = minor
879
    self.pe_size = pe_size
880
    self.stripe_count = stripes
881
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
882
                                      # storage
883
    self.attached = True
884
    return True
885

    
886
  def Assemble(self):
887
    """Assemble the device.
888

889
    We always run `lvchange -ay` on the LV to ensure it's active before
890
    use, as there were cases when xenvg was not active after boot
891
    (also possibly after disk issues).
892

893
    """
894
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
895
    if result.failed:
896
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
897

    
898
  def Shutdown(self):
899
    """Shutdown the device.
900

901
    This is a no-op for the LV device type, as we don't deactivate the
902
    volumes on shutdown.
903

904
    """
905
    pass
906

    
907
  def GetSyncStatus(self):
908
    """Returns the sync status of the device.
909

910
    If this device is a mirroring device, this function returns the
911
    status of the mirror.
912

913
    For logical volumes, sync_percent and estimated_time are always
914
    None (no recovery in progress, as we don't handle the mirrored LV
915
    case). The is_degraded parameter is the inverse of the ldisk
916
    parameter.
917

918
    For the ldisk parameter, we check if the logical volume has the
919
    'virtual' type, which means it's not backed by existing storage
920
    anymore (read from it return I/O error). This happens after a
921
    physical disk failure and subsequent 'vgreduce --removemissing' on
922
    the volume group.
923

924
    The status was already read in Attach, so we just return it.
925

926
    @rtype: objects.BlockDevStatus
927

928
    """
929
    if self._degraded:
930
      ldisk_status = constants.LDS_FAULTY
931
    else:
932
      ldisk_status = constants.LDS_OKAY
933

    
934
    return objects.BlockDevStatus(dev_path=self.dev_path,
935
                                  major=self.major,
936
                                  minor=self.minor,
937
                                  sync_percent=None,
938
                                  estimated_time=None,
939
                                  is_degraded=self._degraded,
940
                                  ldisk_status=ldisk_status)
941

    
942
  def Open(self, force=False):
943
    """Make the device ready for I/O.
944

945
    This is a no-op for the LV device type.
946

947
    """
948
    pass
949

    
950
  def Close(self):
951
    """Notifies that the device will no longer be used for I/O.
952

953
    This is a no-op for the LV device type.
954

955
    """
956
    pass
957

    
958
  def Snapshot(self, size):
959
    """Create a snapshot copy of an lvm block device.
960

961
    @returns: tuple (vg, lv)
962

963
    """
964
    snap_name = self._lv_name + ".snap"
965

    
966
    # remove existing snapshot if found
967
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
968
    _IgnoreError(snap.Remove)
969

    
970
    vg_info = self.GetVGInfo([self._vg_name], False)
971
    if not vg_info:
972
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
973
    free_size, _, _ = vg_info[0]
974
    if free_size < size:
975
      _ThrowError("Not enough free space: required %s,"
976
                  " available %s", size, free_size)
977

    
978
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
979
                               "-n%s" % snap_name, self.dev_path]))
980

    
981
    return (self._vg_name, snap_name)
982

    
983
  def _RemoveOldInfo(self):
984
    """Try to remove old tags from the lv.
985

986
    """
987
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
988
                           self.dev_path])
989
    _CheckResult(result)
990

    
991
    raw_tags = result.stdout.strip()
992
    if raw_tags:
993
      for tag in raw_tags.split(","):
994
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
995
                                   tag.strip(), self.dev_path]))
996

    
997
  def SetInfo(self, text):
998
    """Update metadata with info text.
999

1000
    """
1001
    BlockDev.SetInfo(self, text)
1002

    
1003
    self._RemoveOldInfo()
1004

    
1005
    # Replace invalid characters
1006
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1007
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1008

    
1009
    # Only up to 128 characters are allowed
1010
    text = text[:128]
1011

    
1012
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1013

    
1014
  def Grow(self, amount, dryrun, backingstore):
1015
    """Grow the logical volume.
1016

1017
    """
1018
    if not backingstore:
1019
      return
1020
    if self.pe_size is None or self.stripe_count is None:
1021
      if not self.Attach():
1022
        _ThrowError("Can't attach to LV during Grow()")
1023
    full_stripe_size = self.pe_size * self.stripe_count
1024
    rest = amount % full_stripe_size
1025
    if rest != 0:
1026
      amount += full_stripe_size - rest
1027
    cmd = ["lvextend", "-L", "+%dm" % amount]
1028
    if dryrun:
1029
      cmd.append("--test")
1030
    # we try multiple algorithms since the 'best' ones might not have
1031
    # space available in the right place, but later ones might (since
1032
    # they have less constraints); also note that only recent LVM
1033
    # supports 'cling'
1034
    for alloc_policy in "contiguous", "cling", "normal":
1035
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1036
      if not result.failed:
1037
        return
1038
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1039

    
1040

    
1041
class DRBD8Status(object):
1042
  """A DRBD status representation class.
1043

1044
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
1045

1046
  """
1047
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1048
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1049
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1050
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1051
                       # Due to a bug in drbd in the kernel, introduced in
1052
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1053
                       "(?:\s|M)"
1054
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1055

    
1056
  CS_UNCONFIGURED = "Unconfigured"
1057
  CS_STANDALONE = "StandAlone"
1058
  CS_WFCONNECTION = "WFConnection"
1059
  CS_WFREPORTPARAMS = "WFReportParams"
1060
  CS_CONNECTED = "Connected"
1061
  CS_STARTINGSYNCS = "StartingSyncS"
1062
  CS_STARTINGSYNCT = "StartingSyncT"
1063
  CS_WFBITMAPS = "WFBitMapS"
1064
  CS_WFBITMAPT = "WFBitMapT"
1065
  CS_WFSYNCUUID = "WFSyncUUID"
1066
  CS_SYNCSOURCE = "SyncSource"
1067
  CS_SYNCTARGET = "SyncTarget"
1068
  CS_PAUSEDSYNCS = "PausedSyncS"
1069
  CS_PAUSEDSYNCT = "PausedSyncT"
1070
  CSET_SYNC = compat.UniqueFrozenset([
1071
    CS_WFREPORTPARAMS,
1072
    CS_STARTINGSYNCS,
1073
    CS_STARTINGSYNCT,
1074
    CS_WFBITMAPS,
1075
    CS_WFBITMAPT,
1076
    CS_WFSYNCUUID,
1077
    CS_SYNCSOURCE,
1078
    CS_SYNCTARGET,
1079
    CS_PAUSEDSYNCS,
1080
    CS_PAUSEDSYNCT,
1081
    ])
1082

    
1083
  DS_DISKLESS = "Diskless"
1084
  DS_ATTACHING = "Attaching" # transient state
1085
  DS_FAILED = "Failed" # transient state, next: diskless
1086
  DS_NEGOTIATING = "Negotiating" # transient state
1087
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1088
  DS_OUTDATED = "Outdated"
1089
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1090
  DS_CONSISTENT = "Consistent"
1091
  DS_UPTODATE = "UpToDate" # normal state
1092

    
1093
  RO_PRIMARY = "Primary"
1094
  RO_SECONDARY = "Secondary"
1095
  RO_UNKNOWN = "Unknown"
1096

    
1097
  def __init__(self, procline):
1098
    u = self.UNCONF_RE.match(procline)
1099
    if u:
1100
      self.cstatus = self.CS_UNCONFIGURED
1101
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1102
    else:
1103
      m = self.LINE_RE.match(procline)
1104
      if not m:
1105
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1106
      self.cstatus = m.group(1)
1107
      self.lrole = m.group(2)
1108
      self.rrole = m.group(3)
1109
      self.ldisk = m.group(4)
1110
      self.rdisk = m.group(5)
1111

    
1112
    # end reading of data from the LINE_RE or UNCONF_RE
1113

    
1114
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1115
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1116
    self.is_connected = self.cstatus == self.CS_CONNECTED
1117
    self.is_primary = self.lrole == self.RO_PRIMARY
1118
    self.is_secondary = self.lrole == self.RO_SECONDARY
1119
    self.peer_primary = self.rrole == self.RO_PRIMARY
1120
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1121
    self.both_primary = self.is_primary and self.peer_primary
1122
    self.both_secondary = self.is_secondary and self.peer_secondary
1123

    
1124
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1125
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1126

    
1127
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1128
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1129

    
1130
    m = self.SYNC_RE.match(procline)
1131
    if m:
1132
      self.sync_percent = float(m.group(1))
1133
      hours = int(m.group(2))
1134
      minutes = int(m.group(3))
1135
      seconds = int(m.group(4))
1136
      self.est_time = hours * 3600 + minutes * 60 + seconds
1137
    else:
1138
      # we have (in this if branch) no percent information, but if
1139
      # we're resyncing we need to 'fake' a sync percent information,
1140
      # as this is how cmdlib determines if it makes sense to wait for
1141
      # resyncing or not
1142
      if self.is_in_resync:
1143
        self.sync_percent = 0
1144
      else:
1145
        self.sync_percent = None
1146
      self.est_time = None
1147

    
1148

    
1149
class BaseDRBD(BlockDev): # pylint: disable=W0223
1150
  """Base DRBD class.
1151

1152
  This class contains a few bits of common functionality between the
1153
  0.7 and 8.x versions of DRBD.
1154

1155
  """
1156
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1157
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1158
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1159
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1160

    
1161
  _DRBD_MAJOR = 147
1162
  _ST_UNCONFIGURED = "Unconfigured"
1163
  _ST_WFCONNECTION = "WFConnection"
1164
  _ST_CONNECTED = "Connected"
1165

    
1166
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1167
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1168

    
1169
  @staticmethod
1170
  def _GetProcData(filename=_STATUS_FILE):
1171
    """Return data from /proc/drbd.
1172

1173
    """
1174
    try:
1175
      data = utils.ReadFile(filename).splitlines()
1176
    except EnvironmentError, err:
1177
      if err.errno == errno.ENOENT:
1178
        _ThrowError("The file %s cannot be opened, check if the module"
1179
                    " is loaded (%s)", filename, str(err))
1180
      else:
1181
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1182
    if not data:
1183
      _ThrowError("Can't read any data from %s", filename)
1184
    return data
1185

    
1186
  @classmethod
1187
  def _MassageProcData(cls, data):
1188
    """Transform the output of _GetProdData into a nicer form.
1189

1190
    @return: a dictionary of minor: joined lines from /proc/drbd
1191
        for that minor
1192

1193
    """
1194
    results = {}
1195
    old_minor = old_line = None
1196
    for line in data:
1197
      if not line: # completely empty lines, as can be returned by drbd8.0+
1198
        continue
1199
      lresult = cls._VALID_LINE_RE.match(line)
1200
      if lresult is not None:
1201
        if old_minor is not None:
1202
          results[old_minor] = old_line
1203
        old_minor = int(lresult.group(1))
1204
        old_line = line
1205
      else:
1206
        if old_minor is not None:
1207
          old_line += " " + line.strip()
1208
    # add last line
1209
    if old_minor is not None:
1210
      results[old_minor] = old_line
1211
    return results
1212

    
1213
  @classmethod
1214
  def _GetVersion(cls, proc_data):
1215
    """Return the DRBD version.
1216

1217
    This will return a dict with keys:
1218
      - k_major
1219
      - k_minor
1220
      - k_point
1221
      - api
1222
      - proto
1223
      - proto2 (only on drbd > 8.2.X)
1224

1225
    """
1226
    first_line = proc_data[0].strip()
1227
    version = cls._VERSION_RE.match(first_line)
1228
    if not version:
1229
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1230
                                    first_line)
1231

    
1232
    values = version.groups()
1233
    retval = {
1234
      "k_major": int(values[0]),
1235
      "k_minor": int(values[1]),
1236
      "k_point": int(values[2]),
1237
      "api": int(values[3]),
1238
      "proto": int(values[4]),
1239
      }
1240
    if values[5] is not None:
1241
      retval["proto2"] = values[5]
1242

    
1243
    return retval
1244

    
1245
  @staticmethod
1246
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1247
    """Returns DRBD usermode_helper currently set.
1248

1249
    """
1250
    try:
1251
      helper = utils.ReadFile(filename).splitlines()[0]
1252
    except EnvironmentError, err:
1253
      if err.errno == errno.ENOENT:
1254
        _ThrowError("The file %s cannot be opened, check if the module"
1255
                    " is loaded (%s)", filename, str(err))
1256
      else:
1257
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1258
    if not helper:
1259
      _ThrowError("Can't read any data from %s", filename)
1260
    return helper
1261

    
1262
  @staticmethod
1263
  def _DevPath(minor):
1264
    """Return the path to a drbd device for a given minor.
1265

1266
    """
1267
    return "/dev/drbd%d" % minor
1268

    
1269
  @classmethod
1270
  def GetUsedDevs(cls):
1271
    """Compute the list of used DRBD devices.
1272

1273
    """
1274
    data = cls._GetProcData()
1275

    
1276
    used_devs = {}
1277
    for line in data:
1278
      match = cls._VALID_LINE_RE.match(line)
1279
      if not match:
1280
        continue
1281
      minor = int(match.group(1))
1282
      state = match.group(2)
1283
      if state == cls._ST_UNCONFIGURED:
1284
        continue
1285
      used_devs[minor] = state, line
1286

    
1287
    return used_devs
1288

    
1289
  def _SetFromMinor(self, minor):
1290
    """Set our parameters based on the given minor.
1291

1292
    This sets our minor variable and our dev_path.
1293

1294
    """
1295
    if minor is None:
1296
      self.minor = self.dev_path = None
1297
      self.attached = False
1298
    else:
1299
      self.minor = minor
1300
      self.dev_path = self._DevPath(minor)
1301
      self.attached = True
1302

    
1303
  @staticmethod
1304
  def _CheckMetaSize(meta_device):
1305
    """Check if the given meta device looks like a valid one.
1306

1307
    This currently only checks the size, which must be around
1308
    128MiB.
1309

1310
    """
1311
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1312
    if result.failed:
1313
      _ThrowError("Failed to get device size: %s - %s",
1314
                  result.fail_reason, result.output)
1315
    try:
1316
      sectors = int(result.stdout)
1317
    except (TypeError, ValueError):
1318
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1319
    num_bytes = sectors * 512
1320
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1321
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1322
    # the maximum *valid* size of the meta device when living on top
1323
    # of LVM is hard to compute: it depends on the number of stripes
1324
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1325
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1326
    # size meta device; as such, we restrict it to 1GB (a little bit
1327
    # too generous, but making assumptions about PE size is hard)
1328
    if num_bytes > 1024 * 1024 * 1024:
1329
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1330

    
1331
  def Rename(self, new_id):
1332
    """Rename a device.
1333

1334
    This is not supported for drbd devices.
1335

1336
    """
1337
    raise errors.ProgrammerError("Can't rename a drbd device")
1338

    
1339

    
1340
class DRBD8(BaseDRBD):
1341
  """DRBD v8.x block device.
1342

1343
  This implements the local host part of the DRBD device, i.e. it
1344
  doesn't do anything to the supposed peer. If you need a fully
1345
  connected DRBD pair, you need to use this class on both hosts.
1346

1347
  The unique_id for the drbd device is a (local_ip, local_port,
1348
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1349
  two children: the data device and the meta_device. The meta device
1350
  is checked for valid size and is zeroed on create.
1351

1352
  """
1353
  _MAX_MINORS = 255
1354
  _PARSE_SHOW = None
1355

    
1356
  # timeout constants
1357
  _NET_RECONFIG_TIMEOUT = 60
1358

    
1359
  # command line options for barriers
1360
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1361
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1362
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1363
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1364

    
1365
  def __init__(self, unique_id, children, size, params):
1366
    if children and children.count(None) > 0:
1367
      children = []
1368
    if len(children) not in (0, 2):
1369
      raise ValueError("Invalid configuration data %s" % str(children))
1370
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1371
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1372
    (self._lhost, self._lport,
1373
     self._rhost, self._rport,
1374
     self._aminor, self._secret) = unique_id
1375
    if children:
1376
      if not _CanReadDevice(children[1].dev_path):
1377
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1378
        children = []
1379
    super(DRBD8, self).__init__(unique_id, children, size, params)
1380
    self.major = self._DRBD_MAJOR
1381
    version = self._GetVersion(self._GetProcData())
1382
    if version["k_major"] != 8:
1383
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1384
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1385
                  version["k_major"], version["k_minor"])
1386

    
1387
    if (self._lhost is not None and self._lhost == self._rhost and
1388
        self._lport == self._rport):
1389
      raise ValueError("Invalid configuration data, same local/remote %s" %
1390
                       (unique_id,))
1391
    self.Attach()
1392

    
1393
  @classmethod
1394
  def _InitMeta(cls, minor, dev_path):
1395
    """Initialize a meta device.
1396

1397
    This will not work if the given minor is in use.
1398

1399
    """
1400
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1401
    # try to auto-detect existing filesystems or similar (see
1402
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1403
    # care about the first 128MB of data in the device, even though it
1404
    # can be bigger
1405
    result = utils.RunCmd([constants.DD_CMD,
1406
                           "if=/dev/zero", "of=%s" % dev_path,
1407
                           "bs=1048576", "count=128", "oflag=direct"])
1408
    if result.failed:
1409
      _ThrowError("Can't wipe the meta device: %s", result.output)
1410

    
1411
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1412
                           "v08", dev_path, "0", "create-md"])
1413
    if result.failed:
1414
      _ThrowError("Can't initialize meta device: %s", result.output)
1415

    
1416
  @classmethod
1417
  def _FindUnusedMinor(cls):
1418
    """Find an unused DRBD device.
1419

1420
    This is specific to 8.x as the minors are allocated dynamically,
1421
    so non-existing numbers up to a max minor count are actually free.
1422

1423
    """
1424
    data = cls._GetProcData()
1425

    
1426
    highest = None
1427
    for line in data:
1428
      match = cls._UNUSED_LINE_RE.match(line)
1429
      if match:
1430
        return int(match.group(1))
1431
      match = cls._VALID_LINE_RE.match(line)
1432
      if match:
1433
        minor = int(match.group(1))
1434
        highest = max(highest, minor)
1435
    if highest is None: # there are no minors in use at all
1436
      return 0
1437
    if highest >= cls._MAX_MINORS:
1438
      logging.error("Error: no free drbd minors!")
1439
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1440
    return highest + 1
1441

    
1442
  @classmethod
1443
  def _GetShowParser(cls):
1444
    """Return a parser for `drbd show` output.
1445

1446
    This will either create or return an already-created parser for the
1447
    output of the command `drbd show`.
1448

1449
    """
1450
    if cls._PARSE_SHOW is not None:
1451
      return cls._PARSE_SHOW
1452

    
1453
    # pyparsing setup
1454
    lbrace = pyp.Literal("{").suppress()
1455
    rbrace = pyp.Literal("}").suppress()
1456
    lbracket = pyp.Literal("[").suppress()
1457
    rbracket = pyp.Literal("]").suppress()
1458
    semi = pyp.Literal(";").suppress()
1459
    colon = pyp.Literal(":").suppress()
1460
    # this also converts the value to an int
1461
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1462

    
1463
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1464
    defa = pyp.Literal("_is_default").suppress()
1465
    dbl_quote = pyp.Literal('"').suppress()
1466

    
1467
    keyword = pyp.Word(pyp.alphanums + "-")
1468

    
1469
    # value types
1470
    value = pyp.Word(pyp.alphanums + "_-/.:")
1471
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1472
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1473
                 pyp.Word(pyp.nums + ".") + colon + number)
1474
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1475
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1476
                 pyp.Optional(rbracket) + colon + number)
1477
    # meta device, extended syntax
1478
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1479
    # device name, extended syntax
1480
    device_value = pyp.Literal("minor").suppress() + number
1481

    
1482
    # a statement
1483
    stmt = (~rbrace + keyword + ~lbrace +
1484
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1485
                         device_value) +
1486
            pyp.Optional(defa) + semi +
1487
            pyp.Optional(pyp.restOfLine).suppress())
1488

    
1489
    # an entire section
1490
    section_name = pyp.Word(pyp.alphas + "_")
1491
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1492

    
1493
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1494
    bnf.ignore(comment)
1495

    
1496
    cls._PARSE_SHOW = bnf
1497

    
1498
    return bnf
1499

    
1500
  @classmethod
1501
  def _GetShowData(cls, minor):
1502
    """Return the `drbdsetup show` data for a minor.
1503

1504
    """
1505
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1506
    if result.failed:
1507
      logging.error("Can't display the drbd config: %s - %s",
1508
                    result.fail_reason, result.output)
1509
      return None
1510
    return result.stdout
1511

    
1512
  @classmethod
1513
  def _GetDevInfo(cls, out):
1514
    """Parse details about a given DRBD minor.
1515

1516
    This return, if available, the local backing device (as a path)
1517
    and the local and remote (ip, port) information from a string
1518
    containing the output of the `drbdsetup show` command as returned
1519
    by _GetShowData.
1520

1521
    """
1522
    data = {}
1523
    if not out:
1524
      return data
1525

    
1526
    bnf = cls._GetShowParser()
1527
    # run pyparse
1528

    
1529
    try:
1530
      results = bnf.parseString(out)
1531
    except pyp.ParseException, err:
1532
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1533

    
1534
    # and massage the results into our desired format
1535
    for section in results:
1536
      sname = section[0]
1537
      if sname == "_this_host":
1538
        for lst in section[1:]:
1539
          if lst[0] == "disk":
1540
            data["local_dev"] = lst[1]
1541
          elif lst[0] == "meta-disk":
1542
            data["meta_dev"] = lst[1]
1543
            data["meta_index"] = lst[2]
1544
          elif lst[0] == "address":
1545
            data["local_addr"] = tuple(lst[1:])
1546
      elif sname == "_remote_host":
1547
        for lst in section[1:]:
1548
          if lst[0] == "address":
1549
            data["remote_addr"] = tuple(lst[1:])
1550
    return data
1551

    
1552
  def _MatchesLocal(self, info):
1553
    """Test if our local config matches with an existing device.
1554

1555
    The parameter should be as returned from `_GetDevInfo()`. This
1556
    method tests if our local backing device is the same as the one in
1557
    the info parameter, in effect testing if we look like the given
1558
    device.
1559

1560
    """
1561
    if self._children:
1562
      backend, meta = self._children
1563
    else:
1564
      backend = meta = None
1565

    
1566
    if backend is not None:
1567
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1568
    else:
1569
      retval = ("local_dev" not in info)
1570

    
1571
    if meta is not None:
1572
      retval = retval and ("meta_dev" in info and
1573
                           info["meta_dev"] == meta.dev_path)
1574
      retval = retval and ("meta_index" in info and
1575
                           info["meta_index"] == 0)
1576
    else:
1577
      retval = retval and ("meta_dev" not in info and
1578
                           "meta_index" not in info)
1579
    return retval
1580

    
1581
  def _MatchesNet(self, info):
1582
    """Test if our network config matches with an existing device.
1583

1584
    The parameter should be as returned from `_GetDevInfo()`. This
1585
    method tests if our network configuration is the same as the one
1586
    in the info parameter, in effect testing if we look like the given
1587
    device.
1588

1589
    """
1590
    if (((self._lhost is None and not ("local_addr" in info)) and
1591
         (self._rhost is None and not ("remote_addr" in info)))):
1592
      return True
1593

    
1594
    if self._lhost is None:
1595
      return False
1596

    
1597
    if not ("local_addr" in info and
1598
            "remote_addr" in info):
1599
      return False
1600

    
1601
    retval = (info["local_addr"] == (self._lhost, self._lport))
1602
    retval = (retval and
1603
              info["remote_addr"] == (self._rhost, self._rport))
1604
    return retval
1605

    
1606
  def _AssembleLocal(self, minor, backend, meta, size):
1607
    """Configure the local part of a DRBD device.
1608

1609
    """
1610
    args = ["drbdsetup", self._DevPath(minor), "disk",
1611
            backend, meta, "0",
1612
            "-e", "detach",
1613
            "--create-device"]
1614
    if size:
1615
      args.extend(["-d", "%sm" % size])
1616

    
1617
    version = self._GetVersion(self._GetProcData())
1618
    vmaj = version["k_major"]
1619
    vmin = version["k_minor"]
1620
    vrel = version["k_point"]
1621

    
1622
    barrier_args = \
1623
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1624
                                   self.params[constants.LDP_BARRIERS],
1625
                                   self.params[constants.LDP_NO_META_FLUSH])
1626
    args.extend(barrier_args)
1627

    
1628
    if self.params[constants.LDP_DISK_CUSTOM]:
1629
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1630

    
1631
    result = utils.RunCmd(args)
1632
    if result.failed:
1633
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1634

    
1635
  @classmethod
1636
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1637
                              disable_meta_flush):
1638
    """Compute the DRBD command line parameters for disk barriers
1639

1640
    Returns a list of the disk barrier parameters as requested via the
1641
    disabled_barriers and disable_meta_flush arguments, and according to the
1642
    supported ones in the DRBD version vmaj.vmin.vrel
1643

1644
    If the desired option is unsupported, raises errors.BlockDeviceError.
1645

1646
    """
1647
    disabled_barriers_set = frozenset(disabled_barriers)
1648
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1649
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1650
                                    " barriers" % disabled_barriers)
1651

    
1652
    args = []
1653

    
1654
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1655
    # does not exist)
1656
    if not vmaj == 8 and vmin in (0, 2, 3):
1657
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1658
                                    (vmaj, vmin, vrel))
1659

    
1660
    def _AppendOrRaise(option, min_version):
1661
      """Helper for DRBD options"""
1662
      if min_version is not None and vrel >= min_version:
1663
        args.append(option)
1664
      else:
1665
        raise errors.BlockDeviceError("Could not use the option %s as the"
1666
                                      " DRBD version %d.%d.%d does not support"
1667
                                      " it." % (option, vmaj, vmin, vrel))
1668

    
1669
    # the minimum version for each feature is encoded via pairs of (minor
1670
    # version -> x) where x is version in which support for the option was
1671
    # introduced.
1672
    meta_flush_supported = disk_flush_supported = {
1673
      0: 12,
1674
      2: 7,
1675
      3: 0,
1676
      }
1677

    
1678
    disk_drain_supported = {
1679
      2: 7,
1680
      3: 0,
1681
      }
1682

    
1683
    disk_barriers_supported = {
1684
      3: 0,
1685
      }
1686

    
1687
    # meta flushes
1688
    if disable_meta_flush:
1689
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1690
                     meta_flush_supported.get(vmin, None))
1691

    
1692
    # disk flushes
1693
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1694
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1695
                     disk_flush_supported.get(vmin, None))
1696

    
1697
    # disk drain
1698
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1699
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1700
                     disk_drain_supported.get(vmin, None))
1701

    
1702
    # disk barriers
1703
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1704
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1705
                     disk_barriers_supported.get(vmin, None))
1706

    
1707
    return args
1708

    
1709
  def _AssembleNet(self, minor, net_info, protocol,
1710
                   dual_pri=False, hmac=None, secret=None):
1711
    """Configure the network part of the device.
1712

1713
    """
1714
    lhost, lport, rhost, rport = net_info
1715
    if None in net_info:
1716
      # we don't want network connection and actually want to make
1717
      # sure its shutdown
1718
      self._ShutdownNet(minor)
1719
      return
1720

    
1721
    # Workaround for a race condition. When DRBD is doing its dance to
1722
    # establish a connection with its peer, it also sends the
1723
    # synchronization speed over the wire. In some cases setting the
1724
    # sync speed only after setting up both sides can race with DRBD
1725
    # connecting, hence we set it here before telling DRBD anything
1726
    # about its peer.
1727
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1728
    if sync_errors:
1729
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1730
                  (minor, utils.CommaJoin(sync_errors)))
1731

    
1732
    if netutils.IP6Address.IsValid(lhost):
1733
      if not netutils.IP6Address.IsValid(rhost):
1734
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1735
                    (minor, lhost, rhost))
1736
      family = "ipv6"
1737
    elif netutils.IP4Address.IsValid(lhost):
1738
      if not netutils.IP4Address.IsValid(rhost):
1739
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1740
                    (minor, lhost, rhost))
1741
      family = "ipv4"
1742
    else:
1743
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1744

    
1745
    args = ["drbdsetup", self._DevPath(minor), "net",
1746
            "%s:%s:%s" % (family, lhost, lport),
1747
            "%s:%s:%s" % (family, rhost, rport), protocol,
1748
            "-A", "discard-zero-changes",
1749
            "-B", "consensus",
1750
            "--create-device",
1751
            ]
1752
    if dual_pri:
1753
      args.append("-m")
1754
    if hmac and secret:
1755
      args.extend(["-a", hmac, "-x", secret])
1756

    
1757
    if self.params[constants.LDP_NET_CUSTOM]:
1758
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1759

    
1760
    result = utils.RunCmd(args)
1761
    if result.failed:
1762
      _ThrowError("drbd%d: can't setup network: %s - %s",
1763
                  minor, result.fail_reason, result.output)
1764

    
1765
    def _CheckNetworkConfig():
1766
      info = self._GetDevInfo(self._GetShowData(minor))
1767
      if not "local_addr" in info or not "remote_addr" in info:
1768
        raise utils.RetryAgain()
1769

    
1770
      if (info["local_addr"] != (lhost, lport) or
1771
          info["remote_addr"] != (rhost, rport)):
1772
        raise utils.RetryAgain()
1773

    
1774
    try:
1775
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1776
    except utils.RetryTimeout:
1777
      _ThrowError("drbd%d: timeout while configuring network", minor)
1778

    
1779
  def AddChildren(self, devices):
1780
    """Add a disk to the DRBD device.
1781

1782
    """
1783
    if self.minor is None:
1784
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1785
                  self._aminor)
1786
    if len(devices) != 2:
1787
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1788
    info = self._GetDevInfo(self._GetShowData(self.minor))
1789
    if "local_dev" in info:
1790
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1791
    backend, meta = devices
1792
    if backend.dev_path is None or meta.dev_path is None:
1793
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1794
    backend.Open()
1795
    meta.Open()
1796
    self._CheckMetaSize(meta.dev_path)
1797
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1798

    
1799
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1800
    self._children = devices
1801

    
1802
  def RemoveChildren(self, devices):
1803
    """Detach the drbd device from local storage.
1804

1805
    """
1806
    if self.minor is None:
1807
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1808
                  self._aminor)
1809
    # early return if we don't actually have backing storage
1810
    info = self._GetDevInfo(self._GetShowData(self.minor))
1811
    if "local_dev" not in info:
1812
      return
1813
    if len(self._children) != 2:
1814
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1815
                  self._children)
1816
    if self._children.count(None) == 2: # we don't actually have children :)
1817
      logging.warning("drbd%d: requested detach while detached", self.minor)
1818
      return
1819
    if len(devices) != 2:
1820
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1821
    for child, dev in zip(self._children, devices):
1822
      if dev != child.dev_path:
1823
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1824
                    " RemoveChildren", self.minor, dev, child.dev_path)
1825

    
1826
    self._ShutdownLocal(self.minor)
1827
    self._children = []
1828

    
1829
  @classmethod
1830
  def _SetMinorSyncParams(cls, minor, params):
1831
    """Set the parameters of the DRBD syncer.
1832

1833
    This is the low-level implementation.
1834

1835
    @type minor: int
1836
    @param minor: the drbd minor whose settings we change
1837
    @type params: dict
1838
    @param params: LD level disk parameters related to the synchronization
1839
    @rtype: list
1840
    @return: a list of error messages
1841

1842
    """
1843

    
1844
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1845
    if params[constants.LDP_DYNAMIC_RESYNC]:
1846
      version = cls._GetVersion(cls._GetProcData())
1847
      vmin = version["k_minor"]
1848
      vrel = version["k_point"]
1849

    
1850
      # By definition we are using 8.x, so just check the rest of the version
1851
      # number
1852
      if vmin != 3 or vrel < 9:
1853
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1854
               "dynamic resync speed controller" % (vmin, vrel))
1855
        logging.error(msg)
1856
        return [msg]
1857

    
1858
      if params[constants.LDP_PLAN_AHEAD] == 0:
1859
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1860
               " controller at DRBD level. If you want to disable it, please"
1861
               " set the dynamic-resync disk parameter to False.")
1862
        logging.error(msg)
1863
        return [msg]
1864

    
1865
      # add the c-* parameters to args
1866
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1867
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1868
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1869
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1870
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1871
                   ])
1872

    
1873
    else:
1874
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1875

    
1876
    args.append("--create-device")
1877
    result = utils.RunCmd(args)
1878
    if result.failed:
1879
      msg = ("Can't change syncer rate: %s - %s" %
1880
             (result.fail_reason, result.output))
1881
      logging.error(msg)
1882
      return [msg]
1883

    
1884
    return []
1885

    
1886
  def SetSyncParams(self, params):
1887
    """Set the synchronization parameters of the DRBD syncer.
1888

1889
    @type params: dict
1890
    @param params: LD level disk parameters related to the synchronization
1891
    @rtype: list
1892
    @return: a list of error messages, emitted both by the current node and by
1893
    children. An empty list means no errors
1894

1895
    """
1896
    if self.minor is None:
1897
      err = "Not attached during SetSyncParams"
1898
      logging.info(err)
1899
      return [err]
1900

    
1901
    children_result = super(DRBD8, self).SetSyncParams(params)
1902
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1903
    return children_result
1904

    
1905
  def PauseResumeSync(self, pause):
1906
    """Pauses or resumes the sync of a DRBD device.
1907

1908
    @param pause: Wether to pause or resume
1909
    @return: the success of the operation
1910

1911
    """
1912
    if self.minor is None:
1913
      logging.info("Not attached during PauseSync")
1914
      return False
1915

    
1916
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1917

    
1918
    if pause:
1919
      cmd = "pause-sync"
1920
    else:
1921
      cmd = "resume-sync"
1922

    
1923
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1924
    if result.failed:
1925
      logging.error("Can't %s: %s - %s", cmd,
1926
                    result.fail_reason, result.output)
1927
    return not result.failed and children_result
1928

    
1929
  def GetProcStatus(self):
1930
    """Return device data from /proc.
1931

1932
    """
1933
    if self.minor is None:
1934
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1935
    proc_info = self._MassageProcData(self._GetProcData())
1936
    if self.minor not in proc_info:
1937
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1938
    return DRBD8Status(proc_info[self.minor])
1939

    
1940
  def GetSyncStatus(self):
1941
    """Returns the sync status of the device.
1942

1943

1944
    If sync_percent is None, it means all is ok
1945
    If estimated_time is None, it means we can't estimate
1946
    the time needed, otherwise it's the time left in seconds.
1947

1948

1949
    We set the is_degraded parameter to True on two conditions:
1950
    network not connected or local disk missing.
1951

1952
    We compute the ldisk parameter based on whether we have a local
1953
    disk or not.
1954

1955
    @rtype: objects.BlockDevStatus
1956

1957
    """
1958
    if self.minor is None and not self.Attach():
1959
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1960

    
1961
    stats = self.GetProcStatus()
1962
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1963

    
1964
    if stats.is_disk_uptodate:
1965
      ldisk_status = constants.LDS_OKAY
1966
    elif stats.is_diskless:
1967
      ldisk_status = constants.LDS_FAULTY
1968
    else:
1969
      ldisk_status = constants.LDS_UNKNOWN
1970

    
1971
    return objects.BlockDevStatus(dev_path=self.dev_path,
1972
                                  major=self.major,
1973
                                  minor=self.minor,
1974
                                  sync_percent=stats.sync_percent,
1975
                                  estimated_time=stats.est_time,
1976
                                  is_degraded=is_degraded,
1977
                                  ldisk_status=ldisk_status)
1978

    
1979
  def Open(self, force=False):
1980
    """Make the local state primary.
1981

1982
    If the 'force' parameter is given, the '-o' option is passed to
1983
    drbdsetup. Since this is a potentially dangerous operation, the
1984
    force flag should be only given after creation, when it actually
1985
    is mandatory.
1986

1987
    """
1988
    if self.minor is None and not self.Attach():
1989
      logging.error("DRBD cannot attach to a device during open")
1990
      return False
1991
    cmd = ["drbdsetup", self.dev_path, "primary"]
1992
    if force:
1993
      cmd.append("-o")
1994
    result = utils.RunCmd(cmd)
1995
    if result.failed:
1996
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
1997
                  result.output)
1998

    
1999
  def Close(self):
2000
    """Make the local state secondary.
2001

2002
    This will, of course, fail if the device is in use.
2003

2004
    """
2005
    if self.minor is None and not self.Attach():
2006
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2007
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2008
    if result.failed:
2009
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2010
                  self.minor, result.output)
2011

    
2012
  def DisconnectNet(self):
2013
    """Removes network configuration.
2014

2015
    This method shutdowns the network side of the device.
2016

2017
    The method will wait up to a hardcoded timeout for the device to
2018
    go into standalone after the 'disconnect' command before
2019
    re-configuring it, as sometimes it takes a while for the
2020
    disconnect to actually propagate and thus we might issue a 'net'
2021
    command while the device is still connected. If the device will
2022
    still be attached to the network and we time out, we raise an
2023
    exception.
2024

2025
    """
2026
    if self.minor is None:
2027
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2028

    
2029
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2030
      _ThrowError("drbd%d: DRBD disk missing network info in"
2031
                  " DisconnectNet()", self.minor)
2032

    
2033
    class _DisconnectStatus:
2034
      def __init__(self, ever_disconnected):
2035
        self.ever_disconnected = ever_disconnected
2036

    
2037
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2038

    
2039
    def _WaitForDisconnect():
2040
      if self.GetProcStatus().is_standalone:
2041
        return
2042

    
2043
      # retry the disconnect, it seems possible that due to a well-time
2044
      # disconnect on the peer, my disconnect command might be ignored and
2045
      # forgotten
2046
      dstatus.ever_disconnected = \
2047
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2048

    
2049
      raise utils.RetryAgain()
2050

    
2051
    # Keep start time
2052
    start_time = time.time()
2053

    
2054
    try:
2055
      # Start delay at 100 milliseconds and grow up to 2 seconds
2056
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2057
                  self._NET_RECONFIG_TIMEOUT)
2058
    except utils.RetryTimeout:
2059
      if dstatus.ever_disconnected:
2060
        msg = ("drbd%d: device did not react to the"
2061
               " 'disconnect' command in a timely manner")
2062
      else:
2063
        msg = "drbd%d: can't shutdown network, even after multiple retries"
2064

    
2065
      _ThrowError(msg, self.minor)
2066

    
2067
    reconfig_time = time.time() - start_time
2068
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2069
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2070
                   self.minor, reconfig_time)
2071

    
2072
  def AttachNet(self, multimaster):
2073
    """Reconnects the network.
2074

2075
    This method connects the network side of the device with a
2076
    specified multi-master flag. The device needs to be 'Standalone'
2077
    but have valid network configuration data.
2078

2079
    Args:
2080
      - multimaster: init the network in dual-primary mode
2081

2082
    """
2083
    if self.minor is None:
2084
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2085

    
2086
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2087
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2088

    
2089
    status = self.GetProcStatus()
2090

    
2091
    if not status.is_standalone:
2092
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2093

    
2094
    self._AssembleNet(self.minor,
2095
                      (self._lhost, self._lport, self._rhost, self._rport),
2096
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2097
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2098

    
2099
  def Attach(self):
2100
    """Check if our minor is configured.
2101

2102
    This doesn't do any device configurations - it only checks if the
2103
    minor is in a state different from Unconfigured.
2104

2105
    Note that this function will not change the state of the system in
2106
    any way (except in case of side-effects caused by reading from
2107
    /proc).
2108

2109
    """
2110
    used_devs = self.GetUsedDevs()
2111
    if self._aminor in used_devs:
2112
      minor = self._aminor
2113
    else:
2114
      minor = None
2115

    
2116
    self._SetFromMinor(minor)
2117
    return minor is not None
2118

    
2119
  def Assemble(self):
2120
    """Assemble the drbd.
2121

2122
    Method:
2123
      - if we have a configured device, we try to ensure that it matches
2124
        our config
2125
      - if not, we create it from zero
2126
      - anyway, set the device parameters
2127

2128
    """
2129
    super(DRBD8, self).Assemble()
2130

    
2131
    self.Attach()
2132
    if self.minor is None:
2133
      # local device completely unconfigured
2134
      self._FastAssemble()
2135
    else:
2136
      # we have to recheck the local and network status and try to fix
2137
      # the device
2138
      self._SlowAssemble()
2139

    
2140
    sync_errors = self.SetSyncParams(self.params)
2141
    if sync_errors:
2142
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2143
                  (self.minor, utils.CommaJoin(sync_errors)))
2144

    
2145
  def _SlowAssemble(self):
2146
    """Assembles the DRBD device from a (partially) configured device.
2147

2148
    In case of partially attached (local device matches but no network
2149
    setup), we perform the network attach. If successful, we re-test
2150
    the attach if can return success.
2151

2152
    """
2153
    # TODO: Rewrite to not use a for loop just because there is 'break'
2154
    # pylint: disable=W0631
2155
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2156
    for minor in (self._aminor,):
2157
      info = self._GetDevInfo(self._GetShowData(minor))
2158
      match_l = self._MatchesLocal(info)
2159
      match_r = self._MatchesNet(info)
2160

    
2161
      if match_l and match_r:
2162
        # everything matches
2163
        break
2164

    
2165
      if match_l and not match_r and "local_addr" not in info:
2166
        # disk matches, but not attached to network, attach and recheck
2167
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2168
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2169
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2170
          break
2171
        else:
2172
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2173
                      " show' disagrees", minor)
2174

    
2175
      if match_r and "local_dev" not in info:
2176
        # no local disk, but network attached and it matches
2177
        self._AssembleLocal(minor, self._children[0].dev_path,
2178
                            self._children[1].dev_path, self.size)
2179
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2180
          break
2181
        else:
2182
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2183
                      " show' disagrees", minor)
2184

    
2185
      # this case must be considered only if we actually have local
2186
      # storage, i.e. not in diskless mode, because all diskless
2187
      # devices are equal from the point of view of local
2188
      # configuration
2189
      if (match_l and "local_dev" in info and
2190
          not match_r and "local_addr" in info):
2191
        # strange case - the device network part points to somewhere
2192
        # else, even though its local storage is ours; as we own the
2193
        # drbd space, we try to disconnect from the remote peer and
2194
        # reconnect to our correct one
2195
        try:
2196
          self._ShutdownNet(minor)
2197
        except errors.BlockDeviceError, err:
2198
          _ThrowError("drbd%d: device has correct local storage, wrong"
2199
                      " remote peer and is unable to disconnect in order"
2200
                      " to attach to the correct peer: %s", minor, str(err))
2201
        # note: _AssembleNet also handles the case when we don't want
2202
        # local storage (i.e. one or more of the _[lr](host|port) is
2203
        # None)
2204
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2205
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2206
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2207
          break
2208
        else:
2209
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2210
                      " show' disagrees", minor)
2211

    
2212
    else:
2213
      minor = None
2214

    
2215
    self._SetFromMinor(minor)
2216
    if minor is None:
2217
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2218
                  self._aminor)
2219

    
2220
  def _FastAssemble(self):
2221
    """Assemble the drbd device from zero.
2222

2223
    This is run when in Assemble we detect our minor is unused.
2224

2225
    """
2226
    minor = self._aminor
2227
    if self._children and self._children[0] and self._children[1]:
2228
      self._AssembleLocal(minor, self._children[0].dev_path,
2229
                          self._children[1].dev_path, self.size)
2230
    if self._lhost and self._lport and self._rhost and self._rport:
2231
      self._AssembleNet(minor,
2232
                        (self._lhost, self._lport, self._rhost, self._rport),
2233
                        constants.DRBD_NET_PROTOCOL,
2234
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2235
    self._SetFromMinor(minor)
2236

    
2237
  @classmethod
2238
  def _ShutdownLocal(cls, minor):
2239
    """Detach from the local device.
2240

2241
    I/Os will continue to be served from the remote device. If we
2242
    don't have a remote device, this operation will fail.
2243

2244
    """
2245
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2246
    if result.failed:
2247
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2248

    
2249
  @classmethod
2250
  def _ShutdownNet(cls, minor):
2251
    """Disconnect from the remote peer.
2252

2253
    This fails if we don't have a local device.
2254

2255
    """
2256
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2257
    if result.failed:
2258
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2259

    
2260
  @classmethod
2261
  def _ShutdownAll(cls, minor):
2262
    """Deactivate the device.
2263

2264
    This will, of course, fail if the device is in use.
2265

2266
    """
2267
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2268
    if result.failed:
2269
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2270
                  minor, result.output)
2271

    
2272
  def Shutdown(self):
2273
    """Shutdown the DRBD device.
2274

2275
    """
2276
    if self.minor is None and not self.Attach():
2277
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2278
      return
2279
    minor = self.minor
2280
    self.minor = None
2281
    self.dev_path = None
2282
    self._ShutdownAll(minor)
2283

    
2284
  def Remove(self):
2285
    """Stub remove for DRBD devices.
2286

2287
    """
2288
    self.Shutdown()
2289

    
2290
  @classmethod
2291
  def Create(cls, unique_id, children, size, params, excl_stor):
2292
    """Create a new DRBD8 device.
2293

2294
    Since DRBD devices are not created per se, just assembled, this
2295
    function only initializes the metadata.
2296

2297
    """
2298
    if len(children) != 2:
2299
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2300
    if excl_stor:
2301
      raise errors.ProgrammerError("DRBD device requested with"
2302
                                   " exclusive_storage")
2303
    # check that the minor is unused
2304
    aminor = unique_id[4]
2305
    proc_info = cls._MassageProcData(cls._GetProcData())
2306
    if aminor in proc_info:
2307
      status = DRBD8Status(proc_info[aminor])
2308
      in_use = status.is_in_use
2309
    else:
2310
      in_use = False
2311
    if in_use:
2312
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2313
    meta = children[1]
2314
    meta.Assemble()
2315
    if not meta.Attach():
2316
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2317
                  aminor, meta)
2318
    cls._CheckMetaSize(meta.dev_path)
2319
    cls._InitMeta(aminor, meta.dev_path)
2320
    return cls(unique_id, children, size, params)
2321

    
2322
  def Grow(self, amount, dryrun, backingstore):
2323
    """Resize the DRBD device and its backing storage.
2324

2325
    """
2326
    if self.minor is None:
2327
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2328
    if len(self._children) != 2 or None in self._children:
2329
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2330
    self._children[0].Grow(amount, dryrun, backingstore)
2331
    if dryrun or backingstore:
2332
      # DRBD does not support dry-run mode and is not backing storage,
2333
      # so we'll return here
2334
      return
2335
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2336
                           "%dm" % (self.size + amount)])
2337
    if result.failed:
2338
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2339

    
2340

    
2341
class FileStorage(BlockDev):
2342
  """File device.
2343

2344
  This class represents the a file storage backend device.
2345

2346
  The unique_id for the file device is a (file_driver, file_path) tuple.
2347

2348
  """
2349
  def __init__(self, unique_id, children, size, params):
2350
    """Initalizes a file device backend.
2351

2352
    """
2353
    if children:
2354
      raise errors.BlockDeviceError("Invalid setup for file device")
2355
    super(FileStorage, self).__init__(unique_id, children, size, params)
2356
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2357
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2358
    self.driver = unique_id[0]
2359
    self.dev_path = unique_id[1]
2360

    
2361
    CheckFileStoragePath(self.dev_path)
2362

    
2363
    self.Attach()
2364

    
2365
  def Assemble(self):
2366
    """Assemble the device.
2367

2368
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2369

2370
    """
2371
    if not os.path.exists(self.dev_path):
2372
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2373

    
2374
  def Shutdown(self):
2375
    """Shutdown the device.
2376

2377
    This is a no-op for the file type, as we don't deactivate
2378
    the file on shutdown.
2379

2380
    """
2381
    pass
2382

    
2383
  def Open(self, force=False):
2384
    """Make the device ready for I/O.
2385

2386
    This is a no-op for the file type.
2387

2388
    """
2389
    pass
2390

    
2391
  def Close(self):
2392
    """Notifies that the device will no longer be used for I/O.
2393

2394
    This is a no-op for the file type.
2395

2396
    """
2397
    pass
2398

    
2399
  def Remove(self):
2400
    """Remove the file backing the block device.
2401

2402
    @rtype: boolean
2403
    @return: True if the removal was successful
2404

2405
    """
2406
    try:
2407
      os.remove(self.dev_path)
2408
    except OSError, err:
2409
      if err.errno != errno.ENOENT:
2410
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2411

    
2412
  def Rename(self, new_id):
2413
    """Renames the file.
2414

2415
    """
2416
    # TODO: implement rename for file-based storage
2417
    _ThrowError("Rename is not supported for file-based storage")
2418

    
2419
  def Grow(self, amount, dryrun, backingstore):
2420
    """Grow the file
2421

2422
    @param amount: the amount (in mebibytes) to grow with
2423

2424
    """
2425
    if not backingstore:
2426
      return
2427
    # Check that the file exists
2428
    self.Assemble()
2429
    current_size = self.GetActualSize()
2430
    new_size = current_size + amount * 1024 * 1024
2431
    assert new_size > current_size, "Cannot Grow with a negative amount"
2432
    # We can't really simulate the growth
2433
    if dryrun:
2434
      return
2435
    try:
2436
      f = open(self.dev_path, "a+")
2437
      f.truncate(new_size)
2438
      f.close()
2439
    except EnvironmentError, err:
2440
      _ThrowError("Error in file growth: %", str(err))
2441

    
2442
  def Attach(self):
2443
    """Attach to an existing file.
2444

2445
    Check if this file already exists.
2446

2447
    @rtype: boolean
2448
    @return: True if file exists
2449

2450
    """
2451
    self.attached = os.path.exists(self.dev_path)
2452
    return self.attached
2453

    
2454
  def GetActualSize(self):
2455
    """Return the actual disk size.
2456

2457
    @note: the device needs to be active when this is called
2458

2459
    """
2460
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2461
    try:
2462
      st = os.stat(self.dev_path)
2463
      return st.st_size
2464
    except OSError, err:
2465
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2466

    
2467
  @classmethod
2468
  def Create(cls, unique_id, children, size, params, excl_stor):
2469
    """Create a new file.
2470

2471
    @param size: the size of file in MiB
2472

2473
    @rtype: L{bdev.FileStorage}
2474
    @return: an instance of FileStorage
2475

2476
    """
2477
    if excl_stor:
2478
      raise errors.ProgrammerError("FileStorage device requested with"
2479
                                   " exclusive_storage")
2480
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2481
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2482

    
2483
    dev_path = unique_id[1]
2484

    
2485
    CheckFileStoragePath(dev_path)
2486

    
2487
    try:
2488
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2489
      f = os.fdopen(fd, "w")
2490
      f.truncate(size * 1024 * 1024)
2491
      f.close()
2492
    except EnvironmentError, err:
2493
      if err.errno == errno.EEXIST:
2494
        _ThrowError("File already existing: %s", dev_path)
2495
      _ThrowError("Error in file creation: %", str(err))
2496

    
2497
    return FileStorage(unique_id, children, size, params)
2498

    
2499

    
2500
class PersistentBlockDevice(BlockDev):
2501
  """A block device with persistent node
2502

2503
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2504
  udev helpers are probably required to give persistent, human-friendly
2505
  names.
2506

2507
  For the time being, pathnames are required to lie under /dev.
2508

2509
  """
2510
  def __init__(self, unique_id, children, size, params):
2511
    """Attaches to a static block device.
2512

2513
    The unique_id is a path under /dev.
2514

2515
    """
2516
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2517
                                                params)
2518
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2519
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2520
    self.dev_path = unique_id[1]
2521
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2522
      raise ValueError("Full path '%s' lies outside /dev" %
2523
                              os.path.realpath(self.dev_path))
2524
    # TODO: this is just a safety guard checking that we only deal with devices
2525
    # we know how to handle. In the future this will be integrated with
2526
    # external storage backends and possible values will probably be collected
2527
    # from the cluster configuration.
2528
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2529
      raise ValueError("Got persistent block device of invalid type: %s" %
2530
                       unique_id[0])
2531

    
2532
    self.major = self.minor = None
2533
    self.Attach()
2534

    
2535
  @classmethod
2536
  def Create(cls, unique_id, children, size, params, excl_stor):
2537
    """Create a new device
2538

2539
    This is a noop, we only return a PersistentBlockDevice instance
2540

2541
    """
2542
    if excl_stor:
2543
      raise errors.ProgrammerError("Persistent block device requested with"
2544
                                   " exclusive_storage")
2545
    return PersistentBlockDevice(unique_id, children, 0, params)
2546

    
2547
  def Remove(self):
2548
    """Remove a device
2549

2550
    This is a noop
2551

2552
    """
2553
    pass
2554

    
2555
  def Rename(self, new_id):
2556
    """Rename this device.
2557

2558
    """
2559
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2560

    
2561
  def Attach(self):
2562
    """Attach to an existing block device.
2563

2564

2565
    """
2566
    self.attached = False
2567
    try:
2568
      st = os.stat(self.dev_path)
2569
    except OSError, err:
2570
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2571
      return False
2572

    
2573
    if not stat.S_ISBLK(st.st_mode):
2574
      logging.error("%s is not a block device", self.dev_path)
2575
      return False
2576

    
2577
    self.major = os.major(st.st_rdev)
2578
    self.minor = os.minor(st.st_rdev)
2579
    self.attached = True
2580

    
2581
    return True
2582

    
2583
  def Assemble(self):
2584
    """Assemble the device.
2585

2586
    """
2587
    pass
2588

    
2589
  def Shutdown(self):
2590
    """Shutdown the device.
2591

2592
    """
2593
    pass
2594

    
2595
  def Open(self, force=False):
2596
    """Make the device ready for I/O.
2597

2598
    """
2599
    pass
2600

    
2601
  def Close(self):
2602
    """Notifies that the device will no longer be used for I/O.
2603

2604
    """
2605
    pass
2606

    
2607
  def Grow(self, amount, dryrun, backingstore):
2608
    """Grow the logical volume.
2609

2610
    """
2611
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2612

    
2613

    
2614
class RADOSBlockDevice(BlockDev):
2615
  """A RADOS Block Device (rbd).
2616

2617
  This class implements the RADOS Block Device for the backend. You need
2618
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2619
  this to be functional.
2620

2621
  """
2622
  def __init__(self, unique_id, children, size, params):
2623
    """Attaches to an rbd device.
2624

2625
    """
2626
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2627
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2628
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2629

    
2630
    self.driver, self.rbd_name = unique_id
2631

    
2632
    self.major = self.minor = None
2633
    self.Attach()
2634

    
2635
  @classmethod
2636
  def Create(cls, unique_id, children, size, params, excl_stor):
2637
    """Create a new rbd device.
2638

2639
    Provision a new rbd volume inside a RADOS pool.
2640

2641
    """
2642
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2643
      raise errors.ProgrammerError("Invalid configuration data %s" %
2644
                                   str(unique_id))
2645
    if excl_stor:
2646
      raise errors.ProgrammerError("RBD device requested with"
2647
                                   " exclusive_storage")
2648
    rbd_pool = params[constants.LDP_POOL]
2649
    rbd_name = unique_id[1]
2650

    
2651
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2652
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2653
           rbd_name, "--size", "%s" % size]
2654
    result = utils.RunCmd(cmd)
2655
    if result.failed:
2656
      _ThrowError("rbd creation failed (%s): %s",
2657
                  result.fail_reason, result.output)
2658

    
2659
    return RADOSBlockDevice(unique_id, children, size, params)
2660

    
2661
  def Remove(self):
2662
    """Remove the rbd device.
2663

2664
    """
2665
    rbd_pool = self.params[constants.LDP_POOL]
2666
    rbd_name = self.unique_id[1]
2667

    
2668
    if not self.minor and not self.Attach():
2669
      # The rbd device doesn't exist.
2670
      return
2671

    
2672
    # First shutdown the device (remove mappings).
2673
    self.Shutdown()
2674

    
2675
    # Remove the actual Volume (Image) from the RADOS cluster.
2676
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2677
    result = utils.RunCmd(cmd)
2678
    if result.failed:
2679
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2680
                  result.fail_reason, result.output)
2681

    
2682
  def Rename(self, new_id):
2683
    """Rename this device.
2684

2685
    """
2686
    pass
2687

    
2688
  def Attach(self):
2689
    """Attach to an existing rbd device.
2690

2691
    This method maps the rbd volume that matches our name with
2692
    an rbd device and then attaches to this device.
2693

2694
    """
2695
    self.attached = False
2696

    
2697
    # Map the rbd volume to a block device under /dev
2698
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2699

    
2700
    try:
2701
      st = os.stat(self.dev_path)
2702
    except OSError, err:
2703
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2704
      return False
2705

    
2706
    if not stat.S_ISBLK(st.st_mode):
2707
      logging.error("%s is not a block device", self.dev_path)
2708
      return False
2709

    
2710
    self.major = os.major(st.st_rdev)
2711
    self.minor = os.minor(st.st_rdev)
2712
    self.attached = True
2713

    
2714
    return True
2715

    
2716
  def _MapVolumeToBlockdev(self, unique_id):
2717
    """Maps existing rbd volumes to block devices.
2718

2719
    This method should be idempotent if the mapping already exists.
2720

2721
    @rtype: string
2722
    @return: the block device path that corresponds to the volume
2723

2724
    """
2725
    pool = self.params[constants.LDP_POOL]
2726
    name = unique_id[1]
2727

    
2728
    # Check if the mapping already exists.
2729
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2730
    result = utils.RunCmd(showmap_cmd)
2731
    if result.failed:
2732
      _ThrowError("rbd showmapped failed (%s): %s",
2733
                  result.fail_reason, result.output)
2734

    
2735
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2736

    
2737
    if rbd_dev:
2738
      # The mapping exists. Return it.
2739
      return rbd_dev
2740

    
2741
    # The mapping doesn't exist. Create it.
2742
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2743
    result = utils.RunCmd(map_cmd)
2744
    if result.failed:
2745
      _ThrowError("rbd map failed (%s): %s",
2746
                  result.fail_reason, result.output)
2747

    
2748
    # Find the corresponding rbd device.
2749
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2750
    result = utils.RunCmd(showmap_cmd)
2751
    if result.failed:
2752
      _ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
2753
                  result.fail_reason, result.output)
2754

    
2755
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2756

    
2757
    if not rbd_dev:
2758
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2759
                  " device in output of showmapped, for volume: %s", name)
2760

    
2761
    # The device was successfully mapped. Return it.
2762
    return rbd_dev
2763

    
2764
  @staticmethod
2765
  def _ParseRbdShowmappedOutput(output, volume_name):
2766
    """Parse the output of `rbd showmapped'.
2767

2768
    This method parses the output of `rbd showmapped' and returns
2769
    the rbd block device path (e.g. /dev/rbd0) that matches the
2770
    given rbd volume.
2771

2772
    @type output: string
2773
    @param output: the whole output of `rbd showmapped'
2774
    @type volume_name: string
2775
    @param volume_name: the name of the volume whose device we search for
2776
    @rtype: string or None
2777
    @return: block device path if the volume is mapped, else None
2778

2779
    """
2780
    allfields = 5
2781
    volumefield = 2
2782
    devicefield = 4
2783

    
2784
    field_sep = "\t"
2785

    
2786
    lines = output.splitlines()
2787
    splitted_lines = map(lambda l: l.split(field_sep), lines)
2788

    
2789
    # Check empty output.
2790
    if not splitted_lines:
2791
      _ThrowError("rbd showmapped returned empty output")
2792

    
2793
    # Check showmapped header line, to determine number of fields.
2794
    field_cnt = len(splitted_lines[0])
2795
    if field_cnt != allfields:
2796
      _ThrowError("Cannot parse rbd showmapped output because its format"
2797
                  " seems to have changed; expected %s fields, found %s",
2798
                  allfields, field_cnt)
2799

    
2800
    matched_lines = \
2801
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2802
             splitted_lines)
2803

    
2804
    if len(matched_lines) > 1:
2805
      _ThrowError("The rbd volume %s is mapped more than once."
2806
                  " This shouldn't happen, try to unmap the extra"
2807
                  " devices manually.", volume_name)
2808

    
2809
    if matched_lines:
2810
      # rbd block device found. Return it.
2811
      rbd_dev = matched_lines[0][devicefield]
2812
      return rbd_dev
2813

    
2814
    # The given volume is not mapped.
2815
    return None
2816

    
2817
  def Assemble(self):
2818
    """Assemble the device.
2819

2820
    """
2821
    pass
2822

    
2823
  def Shutdown(self):
2824
    """Shutdown the device.
2825

2826
    """
2827
    if not self.minor and not self.Attach():
2828
      # The rbd device doesn't exist.
2829
      return
2830

    
2831
    # Unmap the block device from the Volume.
2832
    self._UnmapVolumeFromBlockdev(self.unique_id)
2833

    
2834
    self.minor = None
2835
    self.dev_path = None
2836

    
2837
  def _UnmapVolumeFromBlockdev(self, unique_id):
2838
    """Unmaps the rbd device from the Volume it is mapped.
2839

2840
    Unmaps the rbd device from the Volume it was previously mapped to.
2841
    This method should be idempotent if the Volume isn't mapped.
2842

2843
    """
2844
    pool = self.params[constants.LDP_POOL]
2845
    name = unique_id[1]
2846

    
2847
    # Check if the mapping already exists.
2848
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2849
    result = utils.RunCmd(showmap_cmd)
2850
    if result.failed:
2851
      _ThrowError("rbd showmapped failed [during unmap](%s): %s",
2852
                  result.fail_reason, result.output)
2853

    
2854
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2855

    
2856
    if rbd_dev:
2857
      # The mapping exists. Unmap the rbd device.
2858
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2859
      result = utils.RunCmd(unmap_cmd)
2860
      if result.failed:
2861
        _ThrowError("rbd unmap failed (%s): %s",
2862
                    result.fail_reason, result.output)
2863

    
2864
  def Open(self, force=False):
2865
    """Make the device ready for I/O.
2866

2867
    """
2868
    pass
2869

    
2870
  def Close(self):
2871
    """Notifies that the device will no longer be used for I/O.
2872

2873
    """
2874
    pass
2875

    
2876
  def Grow(self, amount, dryrun, backingstore):
2877
    """Grow the Volume.
2878

2879
    @type amount: integer
2880
    @param amount: the amount (in mebibytes) to grow with
2881
    @type dryrun: boolean
2882
    @param dryrun: whether to execute the operation in simulation mode
2883
        only, without actually increasing the size
2884

2885
    """
2886
    if not backingstore:
2887
      return
2888
    if not self.Attach():
2889
      _ThrowError("Can't attach to rbd device during Grow()")
2890

    
2891
    if dryrun:
2892
      # the rbd tool does not support dry runs of resize operations.
2893
      # Since rbd volumes are thinly provisioned, we assume
2894
      # there is always enough free space for the operation.
2895
      return
2896

    
2897
    rbd_pool = self.params[constants.LDP_POOL]
2898
    rbd_name = self.unique_id[1]
2899
    new_size = self.size + amount
2900

    
2901
    # Resize the rbd volume (Image) inside the RADOS cluster.
2902
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2903
           rbd_name, "--size", "%s" % new_size]
2904
    result = utils.RunCmd(cmd)
2905
    if result.failed:
2906
      _ThrowError("rbd resize failed (%s): %s",
2907
                  result.fail_reason, result.output)
2908

    
2909

    
2910
class ExtStorageDevice(BlockDev):
2911
  """A block device provided by an ExtStorage Provider.
2912

2913
  This class implements the External Storage Interface, which means
2914
  handling of the externally provided block devices.
2915

2916
  """
2917
  def __init__(self, unique_id, children, size, params):
2918
    """Attaches to an extstorage block device.
2919

2920
    """
2921
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2922
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2923
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2924

    
2925
    self.driver, self.vol_name = unique_id
2926
    self.ext_params = params
2927

    
2928
    self.major = self.minor = None
2929
    self.Attach()
2930

    
2931
  @classmethod
2932
  def Create(cls, unique_id, children, size, params, excl_stor):
2933
    """Create a new extstorage device.
2934

2935
    Provision a new volume using an extstorage provider, which will
2936
    then be mapped to a block device.
2937

2938
    """
2939
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2940
      raise errors.ProgrammerError("Invalid configuration data %s" %
2941
                                   str(unique_id))
2942
    if excl_stor:
2943
      raise errors.ProgrammerError("extstorage device requested with"
2944
                                   " exclusive_storage")
2945

    
2946
    # Call the External Storage's create script,
2947
    # to provision a new Volume inside the External Storage
2948
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
2949
                      params, str(size))
2950

    
2951
    return ExtStorageDevice(unique_id, children, size, params)
2952

    
2953
  def Remove(self):
2954
    """Remove the extstorage device.
2955

2956
    """
2957
    if not self.minor and not self.Attach():
2958
      # The extstorage device doesn't exist.
2959
      return
2960

    
2961
    # First shutdown the device (remove mappings).
2962
    self.Shutdown()
2963

    
2964
    # Call the External Storage's remove script,
2965
    # to remove the Volume from the External Storage
2966
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
2967
                      self.ext_params)
2968

    
2969
  def Rename(self, new_id):
2970
    """Rename this device.
2971

2972
    """
2973
    pass
2974

    
2975
  def Attach(self):
2976
    """Attach to an existing extstorage device.
2977

2978
    This method maps the extstorage volume that matches our name with
2979
    a corresponding block device and then attaches to this device.
2980

2981
    """
2982
    self.attached = False
2983

    
2984
    # Call the External Storage's attach script,
2985
    # to attach an existing Volume to a block device under /dev
2986
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
2987
                                      self.unique_id, self.ext_params)
2988

    
2989
    try:
2990
      st = os.stat(self.dev_path)
2991
    except OSError, err:
2992
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2993
      return False
2994

    
2995
    if not stat.S_ISBLK(st.st_mode):
2996
      logging.error("%s is not a block device", self.dev_path)
2997
      return False
2998

    
2999
    self.major = os.major(st.st_rdev)
3000
    self.minor = os.minor(st.st_rdev)
3001
    self.attached = True
3002

    
3003
    return True
3004

    
3005
  def Assemble(self):
3006
    """Assemble the device.
3007

3008
    """
3009
    pass
3010

    
3011
  def Shutdown(self):
3012
    """Shutdown the device.
3013

3014
    """
3015
    if not self.minor and not self.Attach():
3016
      # The extstorage device doesn't exist.
3017
      return
3018

    
3019
    # Call the External Storage's detach script,
3020
    # to detach an existing Volume from it's block device under /dev
3021
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3022
                      self.ext_params)
3023

    
3024
    self.minor = None
3025
    self.dev_path = None
3026

    
3027
  def Open(self, force=False):
3028
    """Make the device ready for I/O.
3029

3030
    """
3031
    pass
3032

    
3033
  def Close(self):
3034
    """Notifies that the device will no longer be used for I/O.
3035

3036
    """
3037
    pass
3038

    
3039
  def Grow(self, amount, dryrun, backingstore):
3040
    """Grow the Volume.
3041

3042
    @type amount: integer
3043
    @param amount: the amount (in mebibytes) to grow with
3044
    @type dryrun: boolean
3045
    @param dryrun: whether to execute the operation in simulation mode
3046
        only, without actually increasing the size
3047

3048
    """
3049
    if not backingstore:
3050
      return
3051
    if not self.Attach():
3052
      _ThrowError("Can't attach to extstorage device during Grow()")
3053

    
3054
    if dryrun:
3055
      # we do not support dry runs of resize operations for now.
3056
      return
3057

    
3058
    new_size = self.size + amount
3059

    
3060
    # Call the External Storage's grow script,
3061
    # to grow an existing Volume inside the External Storage
3062
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3063
                      self.ext_params, str(self.size), grow=str(new_size))
3064

    
3065
  def SetInfo(self, text):
3066
    """Update metadata with info text.
3067

3068
    """
3069
    # Replace invalid characters
3070
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3071
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3072

    
3073
    # Only up to 128 characters are allowed
3074
    text = text[:128]
3075

    
3076
    # Call the External Storage's setinfo script,
3077
    # to set metadata for an existing Volume inside the External Storage
3078
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3079
                      self.ext_params, metadata=text)
3080

    
3081

    
3082
def _ExtStorageAction(action, unique_id, ext_params,
3083
                      size=None, grow=None, metadata=None):
3084
  """Take an External Storage action.
3085

3086
  Take an External Storage action concerning or affecting
3087
  a specific Volume inside the External Storage.
3088

3089
  @type action: string
3090
  @param action: which action to perform. One of:
3091
                 create / remove / grow / attach / detach
3092
  @type unique_id: tuple (driver, vol_name)
3093
  @param unique_id: a tuple containing the type of ExtStorage (driver)
3094
                    and the Volume name
3095
  @type ext_params: dict
3096
  @param ext_params: ExtStorage parameters
3097
  @type size: integer
3098
  @param size: the size of the Volume in mebibytes
3099
  @type grow: integer
3100
  @param grow: the new size in mebibytes (after grow)
3101
  @type metadata: string
3102
  @param metadata: metadata info of the Volume, for use by the provider
3103
  @rtype: None or a block device path (during attach)
3104

3105
  """
3106
  driver, vol_name = unique_id
3107

    
3108
  # Create an External Storage instance of type `driver'
3109
  status, inst_es = ExtStorageFromDisk(driver)
3110
  if not status:
3111
    _ThrowError("%s" % inst_es)
3112

    
3113
  # Create the basic environment for the driver's scripts
3114
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3115
                                      grow, metadata)
3116

    
3117
  # Do not use log file for action `attach' as we need
3118
  # to get the output from RunResult
3119
  # TODO: find a way to have a log file for attach too
3120
  logfile = None
3121
  if action is not constants.ES_ACTION_ATTACH:
3122
    logfile = _VolumeLogName(action, driver, vol_name)
3123

    
3124
  # Make sure the given action results in a valid script
3125
  if action not in constants.ES_SCRIPTS:
3126
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3127
                action)
3128

    
3129
  # Find out which external script to run according the given action
3130
  script_name = action + "_script"
3131
  script = getattr(inst_es, script_name)
3132

    
3133
  # Run the external script
3134
  result = utils.RunCmd([script], env=create_env,
3135
                        cwd=inst_es.path, output=logfile,)
3136
  if result.failed:
3137
    logging.error("External storage's %s command '%s' returned"
3138
                  " error: %s, logfile: %s, output: %s",
3139
                  action, result.cmd, result.fail_reason,
3140
                  logfile, result.output)
3141

    
3142
    # If logfile is 'None' (during attach), it breaks TailFile
3143
    # TODO: have a log file for attach too
3144
    if action is not constants.ES_ACTION_ATTACH:
3145
      lines = [utils.SafeEncode(val)
3146
               for val in utils.TailFile(logfile, lines=20)]
3147
    else:
3148
      lines = result.output[-20:]
3149

    
3150
    _ThrowError("External storage's %s script failed (%s), last"
3151
                " lines of output:\n%s",
3152
                action, result.fail_reason, "\n".join(lines))
3153

    
3154
  if action == constants.ES_ACTION_ATTACH:
3155
    return result.stdout
3156

    
3157

    
3158
def ExtStorageFromDisk(name, base_dir=None):
3159
  """Create an ExtStorage instance from disk.
3160

3161
  This function will return an ExtStorage instance
3162
  if the given name is a valid ExtStorage name.
3163

3164
  @type base_dir: string
3165
  @keyword base_dir: Base directory containing ExtStorage installations.
3166
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3167
  @rtype: tuple
3168
  @return: True and the ExtStorage instance if we find a valid one, or
3169
      False and the diagnose message on error
3170

3171
  """
3172
  if base_dir is None:
3173
    es_base_dir = pathutils.ES_SEARCH_PATH
3174
  else:
3175
    es_base_dir = [base_dir]
3176

    
3177
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3178

    
3179
  if es_dir is None:
3180
    return False, ("Directory for External Storage Provider %s not"
3181
                   " found in search path" % name)
3182

    
3183
  # ES Files dictionary, we will populate it with the absolute path
3184
  # names; if the value is True, then it is a required file, otherwise
3185
  # an optional one
3186
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3187

    
3188
  es_files[constants.ES_PARAMETERS_FILE] = True
3189

    
3190
  for (filename, _) in es_files.items():
3191
    es_files[filename] = utils.PathJoin(es_dir, filename)
3192

    
3193
    try:
3194
      st = os.stat(es_files[filename])
3195
    except EnvironmentError, err:
3196
      return False, ("File '%s' under path '%s' is missing (%s)" %
3197
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3198

    
3199
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3200
      return False, ("File '%s' under path '%s' is not a regular file" %
3201
                     (filename, es_dir))
3202

    
3203
    if filename in constants.ES_SCRIPTS:
3204
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3205
        return False, ("File '%s' under path '%s' is not executable" %
3206
                       (filename, es_dir))
3207

    
3208
  parameters = []
3209
  if constants.ES_PARAMETERS_FILE in es_files:
3210
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3211
    try:
3212
      parameters = utils.ReadFile(parameters_file).splitlines()
3213
    except EnvironmentError, err:
3214
      return False, ("Error while reading the EXT parameters file at %s: %s" %
3215
                     (parameters_file, utils.ErrnoOrStr(err)))
3216
    parameters = [v.split(None, 1) for v in parameters]
3217

    
3218
  es_obj = \
3219
    objects.ExtStorage(name=name, path=es_dir,
3220
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3221
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3222
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3223
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3224
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3225
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3226
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3227
                       supported_parameters=parameters)
3228
  return True, es_obj
3229

    
3230

    
3231
def _ExtStorageEnvironment(unique_id, ext_params,
3232
                           size=None, grow=None, metadata=None):
3233
  """Calculate the environment for an External Storage script.
3234

3235
  @type unique_id: tuple (driver, vol_name)
3236
  @param unique_id: ExtStorage pool and name of the Volume
3237
  @type ext_params: dict
3238
  @param ext_params: the EXT parameters
3239
  @type size: string
3240
  @param size: size of the Volume (in mebibytes)
3241
  @type grow: string
3242
  @param grow: new size of Volume after grow (in mebibytes)
3243
  @type metadata: string
3244
  @param metadata: metadata info of the Volume
3245
  @rtype: dict
3246
  @return: dict of environment variables
3247

3248
  """
3249
  vol_name = unique_id[1]
3250

    
3251
  result = {}
3252
  result["VOL_NAME"] = vol_name
3253

    
3254
  # EXT params
3255
  for pname, pvalue in ext_params.items():
3256
    result["EXTP_%s" % pname.upper()] = str(pvalue)
3257

    
3258
  if size is not None:
3259
    result["VOL_SIZE"] = size
3260

    
3261
  if grow is not None:
3262
    result["VOL_NEW_SIZE"] = grow
3263

    
3264
  if metadata is not None:
3265
    result["VOL_METADATA"] = metadata
3266

    
3267
  return result
3268

    
3269

    
3270
def _VolumeLogName(kind, es_name, volume):
3271
  """Compute the ExtStorage log filename for a given Volume and operation.
3272

3273
  @type kind: string
3274
  @param kind: the operation type (e.g. create, remove etc.)
3275
  @type es_name: string
3276
  @param es_name: the ExtStorage name
3277
  @type volume: string
3278
  @param volume: the name of the Volume inside the External Storage
3279

3280
  """
3281
  # Check if the extstorage log dir is a valid dir
3282
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3283
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3284

    
3285
  # TODO: Use tempfile.mkstemp to create unique filename
3286
  base = ("%s-%s-%s-%s.log" %
3287
          (kind, es_name, volume, utils.TimestampForFilename()))
3288
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3289

    
3290

    
3291
DEV_MAP = {
3292
  constants.LD_LV: LogicalVolume,
3293
  constants.LD_DRBD8: DRBD8,
3294
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3295
  constants.LD_RBD: RADOSBlockDevice,
3296
  constants.LD_EXT: ExtStorageDevice,
3297
  }
3298

    
3299
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3300
  DEV_MAP[constants.LD_FILE] = FileStorage
3301

    
3302

    
3303
def _VerifyDiskType(dev_type):
3304
  if dev_type not in DEV_MAP:
3305
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3306

    
3307

    
3308
def _VerifyDiskParams(disk):
3309
  """Verifies if all disk parameters are set.
3310

3311
  """
3312
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3313
  if missing:
3314
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3315
                                 missing)
3316

    
3317

    
3318
def FindDevice(disk, children):
3319
  """Search for an existing, assembled device.
3320

3321
  This will succeed only if the device exists and is assembled, but it
3322
  does not do any actions in order to activate the device.
3323

3324
  @type disk: L{objects.Disk}
3325
  @param disk: the disk object to find
3326
  @type children: list of L{bdev.BlockDev}
3327
  @param children: the list of block devices that are children of the device
3328
                  represented by the disk parameter
3329

3330
  """
3331
  _VerifyDiskType(disk.dev_type)
3332
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3333
                                  disk.params)
3334
  if not device.attached:
3335
    return None
3336
  return device
3337

    
3338

    
3339
def Assemble(disk, children):
3340
  """Try to attach or assemble an existing device.
3341

3342
  This will attach to assemble the device, as needed, to bring it
3343
  fully up. It must be safe to run on already-assembled devices.
3344

3345
  @type disk: L{objects.Disk}
3346
  @param disk: the disk object to assemble
3347
  @type children: list of L{bdev.BlockDev}
3348
  @param children: the list of block devices that are children of the device
3349
                  represented by the disk parameter
3350

3351
  """
3352
  _VerifyDiskType(disk.dev_type)
3353
  _VerifyDiskParams(disk)
3354
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3355
                                  disk.params)
3356
  device.Assemble()
3357
  return device
3358

    
3359

    
3360
def Create(disk, children, excl_stor):
3361
  """Create a device.
3362

3363
  @type disk: L{objects.Disk}
3364
  @param disk: the disk object to create
3365
  @type children: list of L{bdev.BlockDev}
3366
  @param children: the list of block devices that are children of the device
3367
                  represented by the disk parameter
3368
  @type excl_stor: boolean
3369
  @param excl_stor: Whether exclusive_storage is active
3370

3371
  """
3372
  _VerifyDiskType(disk.dev_type)
3373
  _VerifyDiskParams(disk)
3374
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3375
                                         disk.params, excl_stor)
3376
  return device