Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ 376631d1

History | View | Annotate | Download (101.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32

    
33
from ganeti import utils
34
from ganeti import errors
35
from ganeti import constants
36
from ganeti import objects
37
from ganeti import compat
38
from ganeti import netutils
39
from ganeti import pathutils
40

    
41

    
42
# Size of reads in _CanReadDevice
43
_DEVICE_READ_SIZE = 128 * 1024
44

    
45

    
46
def _IgnoreError(fn, *args, **kwargs):
47
  """Executes the given function, ignoring BlockDeviceErrors.
48

49
  This is used in order to simplify the execution of cleanup or
50
  rollback functions.
51

52
  @rtype: boolean
53
  @return: True when fn didn't raise an exception, False otherwise
54

55
  """
56
  try:
57
    fn(*args, **kwargs)
58
    return True
59
  except errors.BlockDeviceError, err:
60
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
61
    return False
62

    
63

    
64
def _ThrowError(msg, *args):
65
  """Log an error to the node daemon and the raise an exception.
66

67
  @type msg: string
68
  @param msg: the text of the exception
69
  @raise errors.BlockDeviceError
70

71
  """
72
  if args:
73
    msg = msg % args
74
  logging.error(msg)
75
  raise errors.BlockDeviceError(msg)
76

    
77

    
78
def _CheckResult(result):
79
  """Throws an error if the given result is a failed one.
80

81
  @param result: result from RunCmd
82

83
  """
84
  if result.failed:
85
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
86
                result.output)
87

    
88

    
89
def _CanReadDevice(path):
90
  """Check if we can read from the given device.
91

92
  This tries to read the first 128k of the device.
93

94
  """
95
  try:
96
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
97
    return True
98
  except EnvironmentError:
99
    logging.warning("Can't read from device %s", path, exc_info=True)
100
    return False
101

    
102

    
103
def _GetForbiddenFileStoragePaths():
104
  """Builds a list of path prefixes which shouldn't be used for file storage.
105

106
  @rtype: frozenset
107

108
  """
109
  paths = set([
110
    "/boot",
111
    "/dev",
112
    "/etc",
113
    "/home",
114
    "/proc",
115
    "/root",
116
    "/sys",
117
    ])
118

    
119
  for prefix in ["", "/usr", "/usr/local"]:
120
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
121
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
122

    
123
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
124

    
125

    
126
def _ComputeWrongFileStoragePaths(paths,
127
                                  _forbidden=_GetForbiddenFileStoragePaths()):
128
  """Cross-checks a list of paths for prefixes considered bad.
129

130
  Some paths, e.g. "/bin", should not be used for file storage.
131

132
  @type paths: list
133
  @param paths: List of paths to be checked
134
  @rtype: list
135
  @return: Sorted list of paths for which the user should be warned
136

137
  """
138
  def _Check(path):
139
    return (not os.path.isabs(path) or
140
            path in _forbidden or
141
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
142

    
143
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
144

    
145

    
146
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
147
  """Returns a list of file storage paths whose prefix is considered bad.
148

149
  See L{_ComputeWrongFileStoragePaths}.
150

151
  """
152
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
153

    
154

    
155
def _CheckFileStoragePath(path, allowed):
156
  """Checks if a path is in a list of allowed paths for file storage.
157

158
  @type path: string
159
  @param path: Path to check
160
  @type allowed: list
161
  @param allowed: List of allowed paths
162
  @raise errors.FileStoragePathError: If the path is not allowed
163

164
  """
165
  if not os.path.isabs(path):
166
    raise errors.FileStoragePathError("File storage path must be absolute,"
167
                                      " got '%s'" % path)
168

    
169
  for i in allowed:
170
    if not os.path.isabs(i):
171
      logging.info("Ignoring relative path '%s' for file storage", i)
172
      continue
173

    
174
    if utils.IsBelowDir(i, path):
175
      break
176
  else:
177
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
178
                                      " storage" % path)
179

    
180

    
181
def _LoadAllowedFileStoragePaths(filename):
182
  """Loads file containing allowed file storage paths.
183

184
  @rtype: list
185
  @return: List of allowed paths (can be an empty list)
186

187
  """
188
  try:
189
    contents = utils.ReadFile(filename)
190
  except EnvironmentError:
191
    return []
192
  else:
193
    return utils.FilterEmptyLinesAndComments(contents)
194

    
195

    
196
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
197
  """Checks if a path is allowed for file storage.
198

199
  @type path: string
200
  @param path: Path to check
201
  @raise errors.FileStoragePathError: If the path is not allowed
202

203
  """
204
  allowed = _LoadAllowedFileStoragePaths(_filename)
205

    
206
  if _ComputeWrongFileStoragePaths([path]):
207
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
208
                                      path)
209

    
210
  _CheckFileStoragePath(path, allowed)
211

    
212

    
213
class BlockDev(object):
214
  """Block device abstract class.
215

216
  A block device can be in the following states:
217
    - not existing on the system, and by `Create()` it goes into:
218
    - existing but not setup/not active, and by `Assemble()` goes into:
219
    - active read-write and by `Open()` it goes into
220
    - online (=used, or ready for use)
221

222
  A device can also be online but read-only, however we are not using
223
  the readonly state (LV has it, if needed in the future) and we are
224
  usually looking at this like at a stack, so it's easier to
225
  conceptualise the transition from not-existing to online and back
226
  like a linear one.
227

228
  The many different states of the device are due to the fact that we
229
  need to cover many device types:
230
    - logical volumes are created, lvchange -a y $lv, and used
231
    - drbd devices are attached to a local disk/remote peer and made primary
232

233
  A block device is identified by three items:
234
    - the /dev path of the device (dynamic)
235
    - a unique ID of the device (static)
236
    - it's major/minor pair (dynamic)
237

238
  Not all devices implement both the first two as distinct items. LVM
239
  logical volumes have their unique ID (the pair volume group, logical
240
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
241
  the /dev path is again dynamic and the unique id is the pair (host1,
242
  dev1), (host2, dev2).
243

244
  You can get to a device in two ways:
245
    - creating the (real) device, which returns you
246
      an attached instance (lvcreate)
247
    - attaching of a python instance to an existing (real) device
248

249
  The second point, the attachement to a device, is different
250
  depending on whether the device is assembled or not. At init() time,
251
  we search for a device with the same unique_id as us. If found,
252
  good. It also means that the device is already assembled. If not,
253
  after assembly we'll have our correct major/minor.
254

255
  """
256
  def __init__(self, unique_id, children, size, params):
257
    self._children = children
258
    self.dev_path = None
259
    self.unique_id = unique_id
260
    self.major = None
261
    self.minor = None
262
    self.attached = False
263
    self.size = size
264
    self.params = params
265

    
266
  def Assemble(self):
267
    """Assemble the device from its components.
268

269
    Implementations of this method by child classes must ensure that:
270
      - after the device has been assembled, it knows its major/minor
271
        numbers; this allows other devices (usually parents) to probe
272
        correctly for their children
273
      - calling this method on an existing, in-use device is safe
274
      - if the device is already configured (and in an OK state),
275
        this method is idempotent
276

277
    """
278
    pass
279

    
280
  def Attach(self):
281
    """Find a device which matches our config and attach to it.
282

283
    """
284
    raise NotImplementedError
285

    
286
  def Close(self):
287
    """Notifies that the device will no longer be used for I/O.
288

289
    """
290
    raise NotImplementedError
291

    
292
  @classmethod
293
  def Create(cls, unique_id, children, size, params):
294
    """Create the device.
295

296
    If the device cannot be created, it will return None
297
    instead. Error messages go to the logging system.
298

299
    Note that for some devices, the unique_id is used, and for other,
300
    the children. The idea is that these two, taken together, are
301
    enough for both creation and assembly (later).
302

303
    """
304
    raise NotImplementedError
305

    
306
  def Remove(self):
307
    """Remove this device.
308

309
    This makes sense only for some of the device types: LV and file
310
    storage. Also note that if the device can't attach, the removal
311
    can't be completed.
312

313
    """
314
    raise NotImplementedError
315

    
316
  def Rename(self, new_id):
317
    """Rename this device.
318

319
    This may or may not make sense for a given device type.
320

321
    """
322
    raise NotImplementedError
323

    
324
  def Open(self, force=False):
325
    """Make the device ready for use.
326

327
    This makes the device ready for I/O. For now, just the DRBD
328
    devices need this.
329

330
    The force parameter signifies that if the device has any kind of
331
    --force thing, it should be used, we know what we are doing.
332

333
    """
334
    raise NotImplementedError
335

    
336
  def Shutdown(self):
337
    """Shut down the device, freeing its children.
338

339
    This undoes the `Assemble()` work, except for the child
340
    assembling; as such, the children on the device are still
341
    assembled after this call.
342

343
    """
344
    raise NotImplementedError
345

    
346
  def SetSyncParams(self, params):
347
    """Adjust the synchronization parameters of the mirror.
348

349
    In case this is not a mirroring device, this is no-op.
350

351
    @param params: dictionary of LD level disk parameters related to the
352
    synchronization.
353
    @rtype: list
354
    @return: a list of error messages, emitted both by the current node and by
355
    children. An empty list means no errors.
356

357
    """
358
    result = []
359
    if self._children:
360
      for child in self._children:
361
        result.extend(child.SetSyncParams(params))
362
    return result
363

    
364
  def PauseResumeSync(self, pause):
365
    """Pause/Resume the sync of the mirror.
366

367
    In case this is not a mirroring device, this is no-op.
368

369
    @param pause: Whether to pause or resume
370

371
    """
372
    result = True
373
    if self._children:
374
      for child in self._children:
375
        result = result and child.PauseResumeSync(pause)
376
    return result
377

    
378
  def GetSyncStatus(self):
379
    """Returns the sync status of the device.
380

381
    If this device is a mirroring device, this function returns the
382
    status of the mirror.
383

384
    If sync_percent is None, it means the device is not syncing.
385

386
    If estimated_time is None, it means we can't estimate
387
    the time needed, otherwise it's the time left in seconds.
388

389
    If is_degraded is True, it means the device is missing
390
    redundancy. This is usually a sign that something went wrong in
391
    the device setup, if sync_percent is None.
392

393
    The ldisk parameter represents the degradation of the local
394
    data. This is only valid for some devices, the rest will always
395
    return False (not degraded).
396

397
    @rtype: objects.BlockDevStatus
398

399
    """
400
    return objects.BlockDevStatus(dev_path=self.dev_path,
401
                                  major=self.major,
402
                                  minor=self.minor,
403
                                  sync_percent=None,
404
                                  estimated_time=None,
405
                                  is_degraded=False,
406
                                  ldisk_status=constants.LDS_OKAY)
407

    
408
  def CombinedSyncStatus(self):
409
    """Calculate the mirror status recursively for our children.
410

411
    The return value is the same as for `GetSyncStatus()` except the
412
    minimum percent and maximum time are calculated across our
413
    children.
414

415
    @rtype: objects.BlockDevStatus
416

417
    """
418
    status = self.GetSyncStatus()
419

    
420
    min_percent = status.sync_percent
421
    max_time = status.estimated_time
422
    is_degraded = status.is_degraded
423
    ldisk_status = status.ldisk_status
424

    
425
    if self._children:
426
      for child in self._children:
427
        child_status = child.GetSyncStatus()
428

    
429
        if min_percent is None:
430
          min_percent = child_status.sync_percent
431
        elif child_status.sync_percent is not None:
432
          min_percent = min(min_percent, child_status.sync_percent)
433

    
434
        if max_time is None:
435
          max_time = child_status.estimated_time
436
        elif child_status.estimated_time is not None:
437
          max_time = max(max_time, child_status.estimated_time)
438

    
439
        is_degraded = is_degraded or child_status.is_degraded
440

    
441
        if ldisk_status is None:
442
          ldisk_status = child_status.ldisk_status
443
        elif child_status.ldisk_status is not None:
444
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
445

    
446
    return objects.BlockDevStatus(dev_path=self.dev_path,
447
                                  major=self.major,
448
                                  minor=self.minor,
449
                                  sync_percent=min_percent,
450
                                  estimated_time=max_time,
451
                                  is_degraded=is_degraded,
452
                                  ldisk_status=ldisk_status)
453

    
454
  def SetInfo(self, text):
455
    """Update metadata with info text.
456

457
    Only supported for some device types.
458

459
    """
460
    for child in self._children:
461
      child.SetInfo(text)
462

    
463
  def Grow(self, amount, dryrun, backingstore):
464
    """Grow the block device.
465

466
    @type amount: integer
467
    @param amount: the amount (in mebibytes) to grow with
468
    @type dryrun: boolean
469
    @param dryrun: whether to execute the operation in simulation mode
470
        only, without actually increasing the size
471
    @param backingstore: whether to execute the operation on backing storage
472
        only, or on "logical" storage only; e.g. DRBD is logical storage,
473
        whereas LVM, file, RBD are backing storage
474

475
    """
476
    raise NotImplementedError
477

    
478
  def GetActualSize(self):
479
    """Return the actual disk size.
480

481
    @note: the device needs to be active when this is called
482

483
    """
484
    assert self.attached, "BlockDevice not attached in GetActualSize()"
485
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
486
    if result.failed:
487
      _ThrowError("blockdev failed (%s): %s",
488
                  result.fail_reason, result.output)
489
    try:
490
      sz = int(result.output.strip())
491
    except (ValueError, TypeError), err:
492
      _ThrowError("Failed to parse blockdev output: %s", str(err))
493
    return sz
494

    
495
  def __repr__(self):
496
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
497
            (self.__class__, self.unique_id, self._children,
498
             self.major, self.minor, self.dev_path))
499

    
500

    
501
class LogicalVolume(BlockDev):
502
  """Logical Volume block device.
503

504
  """
505
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
506
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
507
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
508

    
509
  def __init__(self, unique_id, children, size, params):
510
    """Attaches to a LV device.
511

512
    The unique_id is a tuple (vg_name, lv_name)
513

514
    """
515
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
516
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
517
      raise ValueError("Invalid configuration data %s" % str(unique_id))
518
    self._vg_name, self._lv_name = unique_id
519
    self._ValidateName(self._vg_name)
520
    self._ValidateName(self._lv_name)
521
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
522
    self._degraded = True
523
    self.major = self.minor = self.pe_size = self.stripe_count = None
524
    self.Attach()
525

    
526
  @classmethod
527
  def Create(cls, unique_id, children, size, params):
528
    """Create a new logical volume.
529

530
    """
531
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
532
      raise errors.ProgrammerError("Invalid configuration data %s" %
533
                                   str(unique_id))
534
    vg_name, lv_name = unique_id
535
    cls._ValidateName(vg_name)
536
    cls._ValidateName(lv_name)
537
    pvs_info = cls.GetPVInfo([vg_name])
538
    if not pvs_info:
539
      _ThrowError("Can't compute PV info for vg %s", vg_name)
540
    pvs_info.sort()
541
    pvs_info.reverse()
542

    
543
    pvlist = [pv[1] for pv in pvs_info]
544
    if compat.any(":" in v for v in pvlist):
545
      _ThrowError("Some of your PVs have the invalid character ':' in their"
546
                  " name, this is not supported - please filter them out"
547
                  " in lvm.conf using either 'filter' or 'preferred_names'")
548
    free_size = sum([pv[0] for pv in pvs_info])
549
    current_pvs = len(pvlist)
550
    desired_stripes = params[constants.LDP_STRIPES]
551
    stripes = min(current_pvs, desired_stripes)
552
    if stripes < desired_stripes:
553
      logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
554
                      " available.", desired_stripes, vg_name, current_pvs)
555

    
556
    # The size constraint should have been checked from the master before
557
    # calling the create function.
558
    if free_size < size:
559
      _ThrowError("Not enough free space: required %s,"
560
                  " available %s", size, free_size)
561
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
562
    # If the free space is not well distributed, we won't be able to
563
    # create an optimally-striped volume; in that case, we want to try
564
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
565
    # stripes
566
    for stripes_arg in range(stripes, 0, -1):
567
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
568
      if not result.failed:
569
        break
570
    if result.failed:
571
      _ThrowError("LV create failed (%s): %s",
572
                  result.fail_reason, result.output)
573
    return LogicalVolume(unique_id, children, size, params)
574

    
575
  @staticmethod
576
  def _GetVolumeInfo(lvm_cmd, fields):
577
    """Returns LVM Volumen infos using lvm_cmd
578

579
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
580
    @param fields: Fields to return
581
    @return: A list of dicts each with the parsed fields
582

583
    """
584
    if not fields:
585
      raise errors.ProgrammerError("No fields specified")
586

    
587
    sep = "|"
588
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
589
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
590

    
591
    result = utils.RunCmd(cmd)
592
    if result.failed:
593
      raise errors.CommandError("Can't get the volume information: %s - %s" %
594
                                (result.fail_reason, result.output))
595

    
596
    data = []
597
    for line in result.stdout.splitlines():
598
      splitted_fields = line.strip().split(sep)
599

    
600
      if len(fields) != len(splitted_fields):
601
        raise errors.CommandError("Can't parse %s output: line '%s'" %
602
                                  (lvm_cmd, line))
603

    
604
      data.append(splitted_fields)
605

    
606
    return data
607

    
608
  @classmethod
609
  def GetPVInfo(cls, vg_names, filter_allocatable=True):
610
    """Get the free space info for PVs in a volume group.
611

612
    @param vg_names: list of volume group names, if empty all will be returned
613
    @param filter_allocatable: whether to skip over unallocatable PVs
614

615
    @rtype: list
616
    @return: list of tuples (free_space, name) with free_space in mebibytes
617

618
    """
619
    try:
620
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
621
                                        "pv_attr"])
622
    except errors.GenericError, err:
623
      logging.error("Can't get PV information: %s", err)
624
      return None
625

    
626
    data = []
627
    for pv_name, vg_name, pv_free, pv_attr in info:
628
      # (possibly) skip over pvs which are not allocatable
629
      if filter_allocatable and pv_attr[0] != "a":
630
        continue
631
      # (possibly) skip over pvs which are not in the right volume group(s)
632
      if vg_names and vg_name not in vg_names:
633
        continue
634
      data.append((float(pv_free), pv_name, vg_name))
635

    
636
    return data
637

    
638
  @classmethod
639
  def GetVGInfo(cls, vg_names, filter_readonly=True):
640
    """Get the free space info for specific VGs.
641

642
    @param vg_names: list of volume group names, if empty all will be returned
643
    @param filter_readonly: whether to skip over readonly VGs
644

645
    @rtype: list
646
    @return: list of tuples (free_space, total_size, name) with free_space in
647
             MiB
648

649
    """
650
    try:
651
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
652
                                        "vg_size"])
653
    except errors.GenericError, err:
654
      logging.error("Can't get VG information: %s", err)
655
      return None
656

    
657
    data = []
658
    for vg_name, vg_free, vg_attr, vg_size in info:
659
      # (possibly) skip over vgs which are not writable
660
      if filter_readonly and vg_attr[0] == "r":
661
        continue
662
      # (possibly) skip over vgs which are not in the right volume group(s)
663
      if vg_names and vg_name not in vg_names:
664
        continue
665
      data.append((float(vg_free), float(vg_size), vg_name))
666

    
667
    return data
668

    
669
  @classmethod
670
  def _ValidateName(cls, name):
671
    """Validates that a given name is valid as VG or LV name.
672

673
    The list of valid characters and restricted names is taken out of
674
    the lvm(8) manpage, with the simplification that we enforce both
675
    VG and LV restrictions on the names.
676

677
    """
678
    if (not cls._VALID_NAME_RE.match(name) or
679
        name in cls._INVALID_NAMES or
680
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
681
      _ThrowError("Invalid LVM name '%s'", name)
682

    
683
  def Remove(self):
684
    """Remove this logical volume.
685

686
    """
687
    if not self.minor and not self.Attach():
688
      # the LV does not exist
689
      return
690
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
691
                           (self._vg_name, self._lv_name)])
692
    if result.failed:
693
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
694

    
695
  def Rename(self, new_id):
696
    """Rename this logical volume.
697

698
    """
699
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
700
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
701
    new_vg, new_name = new_id
702
    if new_vg != self._vg_name:
703
      raise errors.ProgrammerError("Can't move a logical volume across"
704
                                   " volume groups (from %s to to %s)" %
705
                                   (self._vg_name, new_vg))
706
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
707
    if result.failed:
708
      _ThrowError("Failed to rename the logical volume: %s", result.output)
709
    self._lv_name = new_name
710
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
711

    
712
  def Attach(self):
713
    """Attach to an existing LV.
714

715
    This method will try to see if an existing and active LV exists
716
    which matches our name. If so, its major/minor will be
717
    recorded.
718

719
    """
720
    self.attached = False
721
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
722
                           "--units=m", "--nosuffix",
723
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
724
                           "vg_extent_size,stripes", self.dev_path])
725
    if result.failed:
726
      logging.error("Can't find LV %s: %s, %s",
727
                    self.dev_path, result.fail_reason, result.output)
728
      return False
729
    # the output can (and will) have multiple lines for multi-segment
730
    # LVs, as the 'stripes' parameter is a segment one, so we take
731
    # only the last entry, which is the one we're interested in; note
732
    # that with LVM2 anyway the 'stripes' value must be constant
733
    # across segments, so this is a no-op actually
734
    out = result.stdout.splitlines()
735
    if not out: # totally empty result? splitlines() returns at least
736
                # one line for any non-empty string
737
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
738
      return False
739
    out = out[-1].strip().rstrip(",")
740
    out = out.split(",")
741
    if len(out) != 5:
742
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
743
      return False
744

    
745
    status, major, minor, pe_size, stripes = out
746
    if len(status) < 6:
747
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
748
      return False
749

    
750
    try:
751
      major = int(major)
752
      minor = int(minor)
753
    except (TypeError, ValueError), err:
754
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
755

    
756
    try:
757
      pe_size = int(float(pe_size))
758
    except (TypeError, ValueError), err:
759
      logging.error("Can't parse vg extent size: %s", err)
760
      return False
761

    
762
    try:
763
      stripes = int(stripes)
764
    except (TypeError, ValueError), err:
765
      logging.error("Can't parse the number of stripes: %s", err)
766
      return False
767

    
768
    self.major = major
769
    self.minor = minor
770
    self.pe_size = pe_size
771
    self.stripe_count = stripes
772
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
773
                                      # storage
774
    self.attached = True
775
    return True
776

    
777
  def Assemble(self):
778
    """Assemble the device.
779

780
    We always run `lvchange -ay` on the LV to ensure it's active before
781
    use, as there were cases when xenvg was not active after boot
782
    (also possibly after disk issues).
783

784
    """
785
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
786
    if result.failed:
787
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
788

    
789
  def Shutdown(self):
790
    """Shutdown the device.
791

792
    This is a no-op for the LV device type, as we don't deactivate the
793
    volumes on shutdown.
794

795
    """
796
    pass
797

    
798
  def GetSyncStatus(self):
799
    """Returns the sync status of the device.
800

801
    If this device is a mirroring device, this function returns the
802
    status of the mirror.
803

804
    For logical volumes, sync_percent and estimated_time are always
805
    None (no recovery in progress, as we don't handle the mirrored LV
806
    case). The is_degraded parameter is the inverse of the ldisk
807
    parameter.
808

809
    For the ldisk parameter, we check if the logical volume has the
810
    'virtual' type, which means it's not backed by existing storage
811
    anymore (read from it return I/O error). This happens after a
812
    physical disk failure and subsequent 'vgreduce --removemissing' on
813
    the volume group.
814

815
    The status was already read in Attach, so we just return it.
816

817
    @rtype: objects.BlockDevStatus
818

819
    """
820
    if self._degraded:
821
      ldisk_status = constants.LDS_FAULTY
822
    else:
823
      ldisk_status = constants.LDS_OKAY
824

    
825
    return objects.BlockDevStatus(dev_path=self.dev_path,
826
                                  major=self.major,
827
                                  minor=self.minor,
828
                                  sync_percent=None,
829
                                  estimated_time=None,
830
                                  is_degraded=self._degraded,
831
                                  ldisk_status=ldisk_status)
832

    
833
  def Open(self, force=False):
834
    """Make the device ready for I/O.
835

836
    This is a no-op for the LV device type.
837

838
    """
839
    pass
840

    
841
  def Close(self):
842
    """Notifies that the device will no longer be used for I/O.
843

844
    This is a no-op for the LV device type.
845

846
    """
847
    pass
848

    
849
  def Snapshot(self, size):
850
    """Create a snapshot copy of an lvm block device.
851

852
    @returns: tuple (vg, lv)
853

854
    """
855
    snap_name = self._lv_name + ".snap"
856

    
857
    # remove existing snapshot if found
858
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
859
    _IgnoreError(snap.Remove)
860

    
861
    vg_info = self.GetVGInfo([self._vg_name])
862
    if not vg_info:
863
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
864
    free_size, _, _ = vg_info[0]
865
    if free_size < size:
866
      _ThrowError("Not enough free space: required %s,"
867
                  " available %s", size, free_size)
868

    
869
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
870
                               "-n%s" % snap_name, self.dev_path]))
871

    
872
    return (self._vg_name, snap_name)
873

    
874
  def _RemoveOldInfo(self):
875
    """Try to remove old tags from the lv.
876

877
    """
878
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
879
                           self.dev_path])
880
    _CheckResult(result)
881

    
882
    raw_tags = result.stdout.strip()
883
    if raw_tags:
884
      for tag in raw_tags.split(","):
885
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
886
                                   tag.strip(), self.dev_path]))
887

    
888
  def SetInfo(self, text):
889
    """Update metadata with info text.
890

891
    """
892
    BlockDev.SetInfo(self, text)
893

    
894
    self._RemoveOldInfo()
895

    
896
    # Replace invalid characters
897
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
898
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
899

    
900
    # Only up to 128 characters are allowed
901
    text = text[:128]
902

    
903
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
904

    
905
  def Grow(self, amount, dryrun, backingstore):
906
    """Grow the logical volume.
907

908
    """
909
    if not backingstore:
910
      return
911
    if self.pe_size is None or self.stripe_count is None:
912
      if not self.Attach():
913
        _ThrowError("Can't attach to LV during Grow()")
914
    full_stripe_size = self.pe_size * self.stripe_count
915
    rest = amount % full_stripe_size
916
    if rest != 0:
917
      amount += full_stripe_size - rest
918
    cmd = ["lvextend", "-L", "+%dm" % amount]
919
    if dryrun:
920
      cmd.append("--test")
921
    # we try multiple algorithms since the 'best' ones might not have
922
    # space available in the right place, but later ones might (since
923
    # they have less constraints); also note that only recent LVM
924
    # supports 'cling'
925
    for alloc_policy in "contiguous", "cling", "normal":
926
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
927
      if not result.failed:
928
        return
929
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
930

    
931

    
932
class DRBD8Status(object):
933
  """A DRBD status representation class.
934

935
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
936

937
  """
938
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
939
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
940
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
941
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
942
                       # Due to a bug in drbd in the kernel, introduced in
943
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
944
                       "(?:\s|M)"
945
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
946

    
947
  CS_UNCONFIGURED = "Unconfigured"
948
  CS_STANDALONE = "StandAlone"
949
  CS_WFCONNECTION = "WFConnection"
950
  CS_WFREPORTPARAMS = "WFReportParams"
951
  CS_CONNECTED = "Connected"
952
  CS_STARTINGSYNCS = "StartingSyncS"
953
  CS_STARTINGSYNCT = "StartingSyncT"
954
  CS_WFBITMAPS = "WFBitMapS"
955
  CS_WFBITMAPT = "WFBitMapT"
956
  CS_WFSYNCUUID = "WFSyncUUID"
957
  CS_SYNCSOURCE = "SyncSource"
958
  CS_SYNCTARGET = "SyncTarget"
959
  CS_PAUSEDSYNCS = "PausedSyncS"
960
  CS_PAUSEDSYNCT = "PausedSyncT"
961
  CSET_SYNC = compat.UniqueFrozenset([
962
    CS_WFREPORTPARAMS,
963
    CS_STARTINGSYNCS,
964
    CS_STARTINGSYNCT,
965
    CS_WFBITMAPS,
966
    CS_WFBITMAPT,
967
    CS_WFSYNCUUID,
968
    CS_SYNCSOURCE,
969
    CS_SYNCTARGET,
970
    CS_PAUSEDSYNCS,
971
    CS_PAUSEDSYNCT,
972
    ])
973

    
974
  DS_DISKLESS = "Diskless"
975
  DS_ATTACHING = "Attaching" # transient state
976
  DS_FAILED = "Failed" # transient state, next: diskless
977
  DS_NEGOTIATING = "Negotiating" # transient state
978
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
979
  DS_OUTDATED = "Outdated"
980
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
981
  DS_CONSISTENT = "Consistent"
982
  DS_UPTODATE = "UpToDate" # normal state
983

    
984
  RO_PRIMARY = "Primary"
985
  RO_SECONDARY = "Secondary"
986
  RO_UNKNOWN = "Unknown"
987

    
988
  def __init__(self, procline):
989
    u = self.UNCONF_RE.match(procline)
990
    if u:
991
      self.cstatus = self.CS_UNCONFIGURED
992
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
993
    else:
994
      m = self.LINE_RE.match(procline)
995
      if not m:
996
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
997
      self.cstatus = m.group(1)
998
      self.lrole = m.group(2)
999
      self.rrole = m.group(3)
1000
      self.ldisk = m.group(4)
1001
      self.rdisk = m.group(5)
1002

    
1003
    # end reading of data from the LINE_RE or UNCONF_RE
1004

    
1005
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1006
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1007
    self.is_connected = self.cstatus == self.CS_CONNECTED
1008
    self.is_primary = self.lrole == self.RO_PRIMARY
1009
    self.is_secondary = self.lrole == self.RO_SECONDARY
1010
    self.peer_primary = self.rrole == self.RO_PRIMARY
1011
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1012
    self.both_primary = self.is_primary and self.peer_primary
1013
    self.both_secondary = self.is_secondary and self.peer_secondary
1014

    
1015
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1016
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1017

    
1018
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1019
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1020

    
1021
    m = self.SYNC_RE.match(procline)
1022
    if m:
1023
      self.sync_percent = float(m.group(1))
1024
      hours = int(m.group(2))
1025
      minutes = int(m.group(3))
1026
      seconds = int(m.group(4))
1027
      self.est_time = hours * 3600 + minutes * 60 + seconds
1028
    else:
1029
      # we have (in this if branch) no percent information, but if
1030
      # we're resyncing we need to 'fake' a sync percent information,
1031
      # as this is how cmdlib determines if it makes sense to wait for
1032
      # resyncing or not
1033
      if self.is_in_resync:
1034
        self.sync_percent = 0
1035
      else:
1036
        self.sync_percent = None
1037
      self.est_time = None
1038

    
1039

    
1040
class BaseDRBD(BlockDev): # pylint: disable=W0223
1041
  """Base DRBD class.
1042

1043
  This class contains a few bits of common functionality between the
1044
  0.7 and 8.x versions of DRBD.
1045

1046
  """
1047
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1048
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1049
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1050
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1051

    
1052
  _DRBD_MAJOR = 147
1053
  _ST_UNCONFIGURED = "Unconfigured"
1054
  _ST_WFCONNECTION = "WFConnection"
1055
  _ST_CONNECTED = "Connected"
1056

    
1057
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1058
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1059

    
1060
  @staticmethod
1061
  def _GetProcData(filename=_STATUS_FILE):
1062
    """Return data from /proc/drbd.
1063

1064
    """
1065
    try:
1066
      data = utils.ReadFile(filename).splitlines()
1067
    except EnvironmentError, err:
1068
      if err.errno == errno.ENOENT:
1069
        _ThrowError("The file %s cannot be opened, check if the module"
1070
                    " is loaded (%s)", filename, str(err))
1071
      else:
1072
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1073
    if not data:
1074
      _ThrowError("Can't read any data from %s", filename)
1075
    return data
1076

    
1077
  @classmethod
1078
  def _MassageProcData(cls, data):
1079
    """Transform the output of _GetProdData into a nicer form.
1080

1081
    @return: a dictionary of minor: joined lines from /proc/drbd
1082
        for that minor
1083

1084
    """
1085
    results = {}
1086
    old_minor = old_line = None
1087
    for line in data:
1088
      if not line: # completely empty lines, as can be returned by drbd8.0+
1089
        continue
1090
      lresult = cls._VALID_LINE_RE.match(line)
1091
      if lresult is not None:
1092
        if old_minor is not None:
1093
          results[old_minor] = old_line
1094
        old_minor = int(lresult.group(1))
1095
        old_line = line
1096
      else:
1097
        if old_minor is not None:
1098
          old_line += " " + line.strip()
1099
    # add last line
1100
    if old_minor is not None:
1101
      results[old_minor] = old_line
1102
    return results
1103

    
1104
  @classmethod
1105
  def _GetVersion(cls, proc_data):
1106
    """Return the DRBD version.
1107

1108
    This will return a dict with keys:
1109
      - k_major
1110
      - k_minor
1111
      - k_point
1112
      - api
1113
      - proto
1114
      - proto2 (only on drbd > 8.2.X)
1115

1116
    """
1117
    first_line = proc_data[0].strip()
1118
    version = cls._VERSION_RE.match(first_line)
1119
    if not version:
1120
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1121
                                    first_line)
1122

    
1123
    values = version.groups()
1124
    retval = {
1125
      "k_major": int(values[0]),
1126
      "k_minor": int(values[1]),
1127
      "k_point": int(values[2]),
1128
      "api": int(values[3]),
1129
      "proto": int(values[4]),
1130
      }
1131
    if values[5] is not None:
1132
      retval["proto2"] = values[5]
1133

    
1134
    return retval
1135

    
1136
  @staticmethod
1137
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1138
    """Returns DRBD usermode_helper currently set.
1139

1140
    """
1141
    try:
1142
      helper = utils.ReadFile(filename).splitlines()[0]
1143
    except EnvironmentError, err:
1144
      if err.errno == errno.ENOENT:
1145
        _ThrowError("The file %s cannot be opened, check if the module"
1146
                    " is loaded (%s)", filename, str(err))
1147
      else:
1148
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1149
    if not helper:
1150
      _ThrowError("Can't read any data from %s", filename)
1151
    return helper
1152

    
1153
  @staticmethod
1154
  def _DevPath(minor):
1155
    """Return the path to a drbd device for a given minor.
1156

1157
    """
1158
    return "/dev/drbd%d" % minor
1159

    
1160
  @classmethod
1161
  def GetUsedDevs(cls):
1162
    """Compute the list of used DRBD devices.
1163

1164
    """
1165
    data = cls._GetProcData()
1166

    
1167
    used_devs = {}
1168
    for line in data:
1169
      match = cls._VALID_LINE_RE.match(line)
1170
      if not match:
1171
        continue
1172
      minor = int(match.group(1))
1173
      state = match.group(2)
1174
      if state == cls._ST_UNCONFIGURED:
1175
        continue
1176
      used_devs[minor] = state, line
1177

    
1178
    return used_devs
1179

    
1180
  def _SetFromMinor(self, minor):
1181
    """Set our parameters based on the given minor.
1182

1183
    This sets our minor variable and our dev_path.
1184

1185
    """
1186
    if minor is None:
1187
      self.minor = self.dev_path = None
1188
      self.attached = False
1189
    else:
1190
      self.minor = minor
1191
      self.dev_path = self._DevPath(minor)
1192
      self.attached = True
1193

    
1194
  @staticmethod
1195
  def _CheckMetaSize(meta_device):
1196
    """Check if the given meta device looks like a valid one.
1197

1198
    This currently only checks the size, which must be around
1199
    128MiB.
1200

1201
    """
1202
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1203
    if result.failed:
1204
      _ThrowError("Failed to get device size: %s - %s",
1205
                  result.fail_reason, result.output)
1206
    try:
1207
      sectors = int(result.stdout)
1208
    except (TypeError, ValueError):
1209
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1210
    num_bytes = sectors * 512
1211
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1212
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1213
    # the maximum *valid* size of the meta device when living on top
1214
    # of LVM is hard to compute: it depends on the number of stripes
1215
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1216
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1217
    # size meta device; as such, we restrict it to 1GB (a little bit
1218
    # too generous, but making assumptions about PE size is hard)
1219
    if num_bytes > 1024 * 1024 * 1024:
1220
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1221

    
1222
  def Rename(self, new_id):
1223
    """Rename a device.
1224

1225
    This is not supported for drbd devices.
1226

1227
    """
1228
    raise errors.ProgrammerError("Can't rename a drbd device")
1229

    
1230

    
1231
class DRBD8(BaseDRBD):
1232
  """DRBD v8.x block device.
1233

1234
  This implements the local host part of the DRBD device, i.e. it
1235
  doesn't do anything to the supposed peer. If you need a fully
1236
  connected DRBD pair, you need to use this class on both hosts.
1237

1238
  The unique_id for the drbd device is a (local_ip, local_port,
1239
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1240
  two children: the data device and the meta_device. The meta device
1241
  is checked for valid size and is zeroed on create.
1242

1243
  """
1244
  _MAX_MINORS = 255
1245
  _PARSE_SHOW = None
1246

    
1247
  # timeout constants
1248
  _NET_RECONFIG_TIMEOUT = 60
1249

    
1250
  # command line options for barriers
1251
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1252
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1253
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1254
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1255

    
1256
  def __init__(self, unique_id, children, size, params):
1257
    if children and children.count(None) > 0:
1258
      children = []
1259
    if len(children) not in (0, 2):
1260
      raise ValueError("Invalid configuration data %s" % str(children))
1261
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1262
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1263
    (self._lhost, self._lport,
1264
     self._rhost, self._rport,
1265
     self._aminor, self._secret) = unique_id
1266
    if children:
1267
      if not _CanReadDevice(children[1].dev_path):
1268
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1269
        children = []
1270
    super(DRBD8, self).__init__(unique_id, children, size, params)
1271
    self.major = self._DRBD_MAJOR
1272
    version = self._GetVersion(self._GetProcData())
1273
    if version["k_major"] != 8:
1274
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1275
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1276
                  version["k_major"], version["k_minor"])
1277

    
1278
    if (self._lhost is not None and self._lhost == self._rhost and
1279
        self._lport == self._rport):
1280
      raise ValueError("Invalid configuration data, same local/remote %s" %
1281
                       (unique_id,))
1282
    self.Attach()
1283

    
1284
  @classmethod
1285
  def _InitMeta(cls, minor, dev_path):
1286
    """Initialize a meta device.
1287

1288
    This will not work if the given minor is in use.
1289

1290
    """
1291
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1292
    # try to auto-detect existing filesystems or similar (see
1293
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1294
    # care about the first 128MB of data in the device, even though it
1295
    # can be bigger
1296
    result = utils.RunCmd([constants.DD_CMD,
1297
                           "if=/dev/zero", "of=%s" % dev_path,
1298
                           "bs=1048576", "count=128", "oflag=direct"])
1299
    if result.failed:
1300
      _ThrowError("Can't wipe the meta device: %s", result.output)
1301

    
1302
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1303
                           "v08", dev_path, "0", "create-md"])
1304
    if result.failed:
1305
      _ThrowError("Can't initialize meta device: %s", result.output)
1306

    
1307
  @classmethod
1308
  def _FindUnusedMinor(cls):
1309
    """Find an unused DRBD device.
1310

1311
    This is specific to 8.x as the minors are allocated dynamically,
1312
    so non-existing numbers up to a max minor count are actually free.
1313

1314
    """
1315
    data = cls._GetProcData()
1316

    
1317
    highest = None
1318
    for line in data:
1319
      match = cls._UNUSED_LINE_RE.match(line)
1320
      if match:
1321
        return int(match.group(1))
1322
      match = cls._VALID_LINE_RE.match(line)
1323
      if match:
1324
        minor = int(match.group(1))
1325
        highest = max(highest, minor)
1326
    if highest is None: # there are no minors in use at all
1327
      return 0
1328
    if highest >= cls._MAX_MINORS:
1329
      logging.error("Error: no free drbd minors!")
1330
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1331
    return highest + 1
1332

    
1333
  @classmethod
1334
  def _GetShowParser(cls):
1335
    """Return a parser for `drbd show` output.
1336

1337
    This will either create or return an already-created parser for the
1338
    output of the command `drbd show`.
1339

1340
    """
1341
    if cls._PARSE_SHOW is not None:
1342
      return cls._PARSE_SHOW
1343

    
1344
    # pyparsing setup
1345
    lbrace = pyp.Literal("{").suppress()
1346
    rbrace = pyp.Literal("}").suppress()
1347
    lbracket = pyp.Literal("[").suppress()
1348
    rbracket = pyp.Literal("]").suppress()
1349
    semi = pyp.Literal(";").suppress()
1350
    colon = pyp.Literal(":").suppress()
1351
    # this also converts the value to an int
1352
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1353

    
1354
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1355
    defa = pyp.Literal("_is_default").suppress()
1356
    dbl_quote = pyp.Literal('"').suppress()
1357

    
1358
    keyword = pyp.Word(pyp.alphanums + "-")
1359

    
1360
    # value types
1361
    value = pyp.Word(pyp.alphanums + "_-/.:")
1362
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1363
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1364
                 pyp.Word(pyp.nums + ".") + colon + number)
1365
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1366
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1367
                 pyp.Optional(rbracket) + colon + number)
1368
    # meta device, extended syntax
1369
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1370
    # device name, extended syntax
1371
    device_value = pyp.Literal("minor").suppress() + number
1372

    
1373
    # a statement
1374
    stmt = (~rbrace + keyword + ~lbrace +
1375
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1376
                         device_value) +
1377
            pyp.Optional(defa) + semi +
1378
            pyp.Optional(pyp.restOfLine).suppress())
1379

    
1380
    # an entire section
1381
    section_name = pyp.Word(pyp.alphas + "_")
1382
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1383

    
1384
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1385
    bnf.ignore(comment)
1386

    
1387
    cls._PARSE_SHOW = bnf
1388

    
1389
    return bnf
1390

    
1391
  @classmethod
1392
  def _GetShowData(cls, minor):
1393
    """Return the `drbdsetup show` data for a minor.
1394

1395
    """
1396
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1397
    if result.failed:
1398
      logging.error("Can't display the drbd config: %s - %s",
1399
                    result.fail_reason, result.output)
1400
      return None
1401
    return result.stdout
1402

    
1403
  @classmethod
1404
  def _GetDevInfo(cls, out):
1405
    """Parse details about a given DRBD minor.
1406

1407
    This return, if available, the local backing device (as a path)
1408
    and the local and remote (ip, port) information from a string
1409
    containing the output of the `drbdsetup show` command as returned
1410
    by _GetShowData.
1411

1412
    """
1413
    data = {}
1414
    if not out:
1415
      return data
1416

    
1417
    bnf = cls._GetShowParser()
1418
    # run pyparse
1419

    
1420
    try:
1421
      results = bnf.parseString(out)
1422
    except pyp.ParseException, err:
1423
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1424

    
1425
    # and massage the results into our desired format
1426
    for section in results:
1427
      sname = section[0]
1428
      if sname == "_this_host":
1429
        for lst in section[1:]:
1430
          if lst[0] == "disk":
1431
            data["local_dev"] = lst[1]
1432
          elif lst[0] == "meta-disk":
1433
            data["meta_dev"] = lst[1]
1434
            data["meta_index"] = lst[2]
1435
          elif lst[0] == "address":
1436
            data["local_addr"] = tuple(lst[1:])
1437
      elif sname == "_remote_host":
1438
        for lst in section[1:]:
1439
          if lst[0] == "address":
1440
            data["remote_addr"] = tuple(lst[1:])
1441
    return data
1442

    
1443
  def _MatchesLocal(self, info):
1444
    """Test if our local config matches with an existing device.
1445

1446
    The parameter should be as returned from `_GetDevInfo()`. This
1447
    method tests if our local backing device is the same as the one in
1448
    the info parameter, in effect testing if we look like the given
1449
    device.
1450

1451
    """
1452
    if self._children:
1453
      backend, meta = self._children
1454
    else:
1455
      backend = meta = None
1456

    
1457
    if backend is not None:
1458
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1459
    else:
1460
      retval = ("local_dev" not in info)
1461

    
1462
    if meta is not None:
1463
      retval = retval and ("meta_dev" in info and
1464
                           info["meta_dev"] == meta.dev_path)
1465
      retval = retval and ("meta_index" in info and
1466
                           info["meta_index"] == 0)
1467
    else:
1468
      retval = retval and ("meta_dev" not in info and
1469
                           "meta_index" not in info)
1470
    return retval
1471

    
1472
  def _MatchesNet(self, info):
1473
    """Test if our network config matches with an existing device.
1474

1475
    The parameter should be as returned from `_GetDevInfo()`. This
1476
    method tests if our network configuration is the same as the one
1477
    in the info parameter, in effect testing if we look like the given
1478
    device.
1479

1480
    """
1481
    if (((self._lhost is None and not ("local_addr" in info)) and
1482
         (self._rhost is None and not ("remote_addr" in info)))):
1483
      return True
1484

    
1485
    if self._lhost is None:
1486
      return False
1487

    
1488
    if not ("local_addr" in info and
1489
            "remote_addr" in info):
1490
      return False
1491

    
1492
    retval = (info["local_addr"] == (self._lhost, self._lport))
1493
    retval = (retval and
1494
              info["remote_addr"] == (self._rhost, self._rport))
1495
    return retval
1496

    
1497
  def _AssembleLocal(self, minor, backend, meta, size):
1498
    """Configure the local part of a DRBD device.
1499

1500
    """
1501
    args = ["drbdsetup", self._DevPath(minor), "disk",
1502
            backend, meta, "0",
1503
            "-e", "detach",
1504
            "--create-device"]
1505
    if size:
1506
      args.extend(["-d", "%sm" % size])
1507

    
1508
    version = self._GetVersion(self._GetProcData())
1509
    vmaj = version["k_major"]
1510
    vmin = version["k_minor"]
1511
    vrel = version["k_point"]
1512

    
1513
    barrier_args = \
1514
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1515
                                   self.params[constants.LDP_BARRIERS],
1516
                                   self.params[constants.LDP_NO_META_FLUSH])
1517
    args.extend(barrier_args)
1518

    
1519
    if self.params[constants.LDP_DISK_CUSTOM]:
1520
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1521

    
1522
    result = utils.RunCmd(args)
1523
    if result.failed:
1524
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1525

    
1526
  @classmethod
1527
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1528
                              disable_meta_flush):
1529
    """Compute the DRBD command line parameters for disk barriers
1530

1531
    Returns a list of the disk barrier parameters as requested via the
1532
    disabled_barriers and disable_meta_flush arguments, and according to the
1533
    supported ones in the DRBD version vmaj.vmin.vrel
1534

1535
    If the desired option is unsupported, raises errors.BlockDeviceError.
1536

1537
    """
1538
    disabled_barriers_set = frozenset(disabled_barriers)
1539
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1540
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1541
                                    " barriers" % disabled_barriers)
1542

    
1543
    args = []
1544

    
1545
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1546
    # does not exist)
1547
    if not vmaj == 8 and vmin in (0, 2, 3):
1548
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1549
                                    (vmaj, vmin, vrel))
1550

    
1551
    def _AppendOrRaise(option, min_version):
1552
      """Helper for DRBD options"""
1553
      if min_version is not None and vrel >= min_version:
1554
        args.append(option)
1555
      else:
1556
        raise errors.BlockDeviceError("Could not use the option %s as the"
1557
                                      " DRBD version %d.%d.%d does not support"
1558
                                      " it." % (option, vmaj, vmin, vrel))
1559

    
1560
    # the minimum version for each feature is encoded via pairs of (minor
1561
    # version -> x) where x is version in which support for the option was
1562
    # introduced.
1563
    meta_flush_supported = disk_flush_supported = {
1564
      0: 12,
1565
      2: 7,
1566
      3: 0,
1567
      }
1568

    
1569
    disk_drain_supported = {
1570
      2: 7,
1571
      3: 0,
1572
      }
1573

    
1574
    disk_barriers_supported = {
1575
      3: 0,
1576
      }
1577

    
1578
    # meta flushes
1579
    if disable_meta_flush:
1580
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1581
                     meta_flush_supported.get(vmin, None))
1582

    
1583
    # disk flushes
1584
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1585
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1586
                     disk_flush_supported.get(vmin, None))
1587

    
1588
    # disk drain
1589
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1590
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1591
                     disk_drain_supported.get(vmin, None))
1592

    
1593
    # disk barriers
1594
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1595
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1596
                     disk_barriers_supported.get(vmin, None))
1597

    
1598
    return args
1599

    
1600
  def _AssembleNet(self, minor, net_info, protocol,
1601
                   dual_pri=False, hmac=None, secret=None):
1602
    """Configure the network part of the device.
1603

1604
    """
1605
    lhost, lport, rhost, rport = net_info
1606
    if None in net_info:
1607
      # we don't want network connection and actually want to make
1608
      # sure its shutdown
1609
      self._ShutdownNet(minor)
1610
      return
1611

    
1612
    # Workaround for a race condition. When DRBD is doing its dance to
1613
    # establish a connection with its peer, it also sends the
1614
    # synchronization speed over the wire. In some cases setting the
1615
    # sync speed only after setting up both sides can race with DRBD
1616
    # connecting, hence we set it here before telling DRBD anything
1617
    # about its peer.
1618
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1619
    if sync_errors:
1620
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1621
                  (minor, utils.CommaJoin(sync_errors)))
1622

    
1623
    if netutils.IP6Address.IsValid(lhost):
1624
      if not netutils.IP6Address.IsValid(rhost):
1625
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1626
                    (minor, lhost, rhost))
1627
      family = "ipv6"
1628
    elif netutils.IP4Address.IsValid(lhost):
1629
      if not netutils.IP4Address.IsValid(rhost):
1630
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1631
                    (minor, lhost, rhost))
1632
      family = "ipv4"
1633
    else:
1634
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1635

    
1636
    args = ["drbdsetup", self._DevPath(minor), "net",
1637
            "%s:%s:%s" % (family, lhost, lport),
1638
            "%s:%s:%s" % (family, rhost, rport), protocol,
1639
            "-A", "discard-zero-changes",
1640
            "-B", "consensus",
1641
            "--create-device",
1642
            ]
1643
    if dual_pri:
1644
      args.append("-m")
1645
    if hmac and secret:
1646
      args.extend(["-a", hmac, "-x", secret])
1647

    
1648
    if self.params[constants.LDP_NET_CUSTOM]:
1649
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1650

    
1651
    result = utils.RunCmd(args)
1652
    if result.failed:
1653
      _ThrowError("drbd%d: can't setup network: %s - %s",
1654
                  minor, result.fail_reason, result.output)
1655

    
1656
    def _CheckNetworkConfig():
1657
      info = self._GetDevInfo(self._GetShowData(minor))
1658
      if not "local_addr" in info or not "remote_addr" in info:
1659
        raise utils.RetryAgain()
1660

    
1661
      if (info["local_addr"] != (lhost, lport) or
1662
          info["remote_addr"] != (rhost, rport)):
1663
        raise utils.RetryAgain()
1664

    
1665
    try:
1666
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1667
    except utils.RetryTimeout:
1668
      _ThrowError("drbd%d: timeout while configuring network", minor)
1669

    
1670
  def AddChildren(self, devices):
1671
    """Add a disk to the DRBD device.
1672

1673
    """
1674
    if self.minor is None:
1675
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1676
                  self._aminor)
1677
    if len(devices) != 2:
1678
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1679
    info = self._GetDevInfo(self._GetShowData(self.minor))
1680
    if "local_dev" in info:
1681
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1682
    backend, meta = devices
1683
    if backend.dev_path is None or meta.dev_path is None:
1684
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1685
    backend.Open()
1686
    meta.Open()
1687
    self._CheckMetaSize(meta.dev_path)
1688
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1689

    
1690
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1691
    self._children = devices
1692

    
1693
  def RemoveChildren(self, devices):
1694
    """Detach the drbd device from local storage.
1695

1696
    """
1697
    if self.minor is None:
1698
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1699
                  self._aminor)
1700
    # early return if we don't actually have backing storage
1701
    info = self._GetDevInfo(self._GetShowData(self.minor))
1702
    if "local_dev" not in info:
1703
      return
1704
    if len(self._children) != 2:
1705
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1706
                  self._children)
1707
    if self._children.count(None) == 2: # we don't actually have children :)
1708
      logging.warning("drbd%d: requested detach while detached", self.minor)
1709
      return
1710
    if len(devices) != 2:
1711
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1712
    for child, dev in zip(self._children, devices):
1713
      if dev != child.dev_path:
1714
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1715
                    " RemoveChildren", self.minor, dev, child.dev_path)
1716

    
1717
    self._ShutdownLocal(self.minor)
1718
    self._children = []
1719

    
1720
  @classmethod
1721
  def _SetMinorSyncParams(cls, minor, params):
1722
    """Set the parameters of the DRBD syncer.
1723

1724
    This is the low-level implementation.
1725

1726
    @type minor: int
1727
    @param minor: the drbd minor whose settings we change
1728
    @type params: dict
1729
    @param params: LD level disk parameters related to the synchronization
1730
    @rtype: list
1731
    @return: a list of error messages
1732

1733
    """
1734

    
1735
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1736
    if params[constants.LDP_DYNAMIC_RESYNC]:
1737
      version = cls._GetVersion(cls._GetProcData())
1738
      vmin = version["k_minor"]
1739
      vrel = version["k_point"]
1740

    
1741
      # By definition we are using 8.x, so just check the rest of the version
1742
      # number
1743
      if vmin != 3 or vrel < 9:
1744
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1745
               "dynamic resync speed controller" % (vmin, vrel))
1746
        logging.error(msg)
1747
        return [msg]
1748

    
1749
      if params[constants.LDP_PLAN_AHEAD] == 0:
1750
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1751
               " controller at DRBD level. If you want to disable it, please"
1752
               " set the dynamic-resync disk parameter to False.")
1753
        logging.error(msg)
1754
        return [msg]
1755

    
1756
      # add the c-* parameters to args
1757
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1758
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1759
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1760
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1761
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1762
                   ])
1763

    
1764
    else:
1765
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1766

    
1767
    args.append("--create-device")
1768
    result = utils.RunCmd(args)
1769
    if result.failed:
1770
      msg = ("Can't change syncer rate: %s - %s" %
1771
             (result.fail_reason, result.output))
1772
      logging.error(msg)
1773
      return [msg]
1774

    
1775
    return []
1776

    
1777
  def SetSyncParams(self, params):
1778
    """Set the synchronization parameters of the DRBD syncer.
1779

1780
    @type params: dict
1781
    @param params: LD level disk parameters related to the synchronization
1782
    @rtype: list
1783
    @return: a list of error messages, emitted both by the current node and by
1784
    children. An empty list means no errors
1785

1786
    """
1787
    if self.minor is None:
1788
      err = "Not attached during SetSyncParams"
1789
      logging.info(err)
1790
      return [err]
1791

    
1792
    children_result = super(DRBD8, self).SetSyncParams(params)
1793
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1794
    return children_result
1795

    
1796
  def PauseResumeSync(self, pause):
1797
    """Pauses or resumes the sync of a DRBD device.
1798

1799
    @param pause: Wether to pause or resume
1800
    @return: the success of the operation
1801

1802
    """
1803
    if self.minor is None:
1804
      logging.info("Not attached during PauseSync")
1805
      return False
1806

    
1807
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1808

    
1809
    if pause:
1810
      cmd = "pause-sync"
1811
    else:
1812
      cmd = "resume-sync"
1813

    
1814
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1815
    if result.failed:
1816
      logging.error("Can't %s: %s - %s", cmd,
1817
                    result.fail_reason, result.output)
1818
    return not result.failed and children_result
1819

    
1820
  def GetProcStatus(self):
1821
    """Return device data from /proc.
1822

1823
    """
1824
    if self.minor is None:
1825
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1826
    proc_info = self._MassageProcData(self._GetProcData())
1827
    if self.minor not in proc_info:
1828
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1829
    return DRBD8Status(proc_info[self.minor])
1830

    
1831
  def GetSyncStatus(self):
1832
    """Returns the sync status of the device.
1833

1834

1835
    If sync_percent is None, it means all is ok
1836
    If estimated_time is None, it means we can't estimate
1837
    the time needed, otherwise it's the time left in seconds.
1838

1839

1840
    We set the is_degraded parameter to True on two conditions:
1841
    network not connected or local disk missing.
1842

1843
    We compute the ldisk parameter based on whether we have a local
1844
    disk or not.
1845

1846
    @rtype: objects.BlockDevStatus
1847

1848
    """
1849
    if self.minor is None and not self.Attach():
1850
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1851

    
1852
    stats = self.GetProcStatus()
1853
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1854

    
1855
    if stats.is_disk_uptodate:
1856
      ldisk_status = constants.LDS_OKAY
1857
    elif stats.is_diskless:
1858
      ldisk_status = constants.LDS_FAULTY
1859
    else:
1860
      ldisk_status = constants.LDS_UNKNOWN
1861

    
1862
    return objects.BlockDevStatus(dev_path=self.dev_path,
1863
                                  major=self.major,
1864
                                  minor=self.minor,
1865
                                  sync_percent=stats.sync_percent,
1866
                                  estimated_time=stats.est_time,
1867
                                  is_degraded=is_degraded,
1868
                                  ldisk_status=ldisk_status)
1869

    
1870
  def Open(self, force=False):
1871
    """Make the local state primary.
1872

1873
    If the 'force' parameter is given, the '-o' option is passed to
1874
    drbdsetup. Since this is a potentially dangerous operation, the
1875
    force flag should be only given after creation, when it actually
1876
    is mandatory.
1877

1878
    """
1879
    if self.minor is None and not self.Attach():
1880
      logging.error("DRBD cannot attach to a device during open")
1881
      return False
1882
    cmd = ["drbdsetup", self.dev_path, "primary"]
1883
    if force:
1884
      cmd.append("-o")
1885
    result = utils.RunCmd(cmd)
1886
    if result.failed:
1887
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
1888
                  result.output)
1889

    
1890
  def Close(self):
1891
    """Make the local state secondary.
1892

1893
    This will, of course, fail if the device is in use.
1894

1895
    """
1896
    if self.minor is None and not self.Attach():
1897
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
1898
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
1899
    if result.failed:
1900
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
1901
                  self.minor, result.output)
1902

    
1903
  def DisconnectNet(self):
1904
    """Removes network configuration.
1905

1906
    This method shutdowns the network side of the device.
1907

1908
    The method will wait up to a hardcoded timeout for the device to
1909
    go into standalone after the 'disconnect' command before
1910
    re-configuring it, as sometimes it takes a while for the
1911
    disconnect to actually propagate and thus we might issue a 'net'
1912
    command while the device is still connected. If the device will
1913
    still be attached to the network and we time out, we raise an
1914
    exception.
1915

1916
    """
1917
    if self.minor is None:
1918
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
1919

    
1920
    if None in (self._lhost, self._lport, self._rhost, self._rport):
1921
      _ThrowError("drbd%d: DRBD disk missing network info in"
1922
                  " DisconnectNet()", self.minor)
1923

    
1924
    class _DisconnectStatus:
1925
      def __init__(self, ever_disconnected):
1926
        self.ever_disconnected = ever_disconnected
1927

    
1928
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
1929

    
1930
    def _WaitForDisconnect():
1931
      if self.GetProcStatus().is_standalone:
1932
        return
1933

    
1934
      # retry the disconnect, it seems possible that due to a well-time
1935
      # disconnect on the peer, my disconnect command might be ignored and
1936
      # forgotten
1937
      dstatus.ever_disconnected = \
1938
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
1939

    
1940
      raise utils.RetryAgain()
1941

    
1942
    # Keep start time
1943
    start_time = time.time()
1944

    
1945
    try:
1946
      # Start delay at 100 milliseconds and grow up to 2 seconds
1947
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
1948
                  self._NET_RECONFIG_TIMEOUT)
1949
    except utils.RetryTimeout:
1950
      if dstatus.ever_disconnected:
1951
        msg = ("drbd%d: device did not react to the"
1952
               " 'disconnect' command in a timely manner")
1953
      else:
1954
        msg = "drbd%d: can't shutdown network, even after multiple retries"
1955

    
1956
      _ThrowError(msg, self.minor)
1957

    
1958
    reconfig_time = time.time() - start_time
1959
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
1960
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
1961
                   self.minor, reconfig_time)
1962

    
1963
  def AttachNet(self, multimaster):
1964
    """Reconnects the network.
1965

1966
    This method connects the network side of the device with a
1967
    specified multi-master flag. The device needs to be 'Standalone'
1968
    but have valid network configuration data.
1969

1970
    Args:
1971
      - multimaster: init the network in dual-primary mode
1972

1973
    """
1974
    if self.minor is None:
1975
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
1976

    
1977
    if None in (self._lhost, self._lport, self._rhost, self._rport):
1978
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
1979

    
1980
    status = self.GetProcStatus()
1981

    
1982
    if not status.is_standalone:
1983
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
1984

    
1985
    self._AssembleNet(self.minor,
1986
                      (self._lhost, self._lport, self._rhost, self._rport),
1987
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
1988
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
1989

    
1990
  def Attach(self):
1991
    """Check if our minor is configured.
1992

1993
    This doesn't do any device configurations - it only checks if the
1994
    minor is in a state different from Unconfigured.
1995

1996
    Note that this function will not change the state of the system in
1997
    any way (except in case of side-effects caused by reading from
1998
    /proc).
1999

2000
    """
2001
    used_devs = self.GetUsedDevs()
2002
    if self._aminor in used_devs:
2003
      minor = self._aminor
2004
    else:
2005
      minor = None
2006

    
2007
    self._SetFromMinor(minor)
2008
    return minor is not None
2009

    
2010
  def Assemble(self):
2011
    """Assemble the drbd.
2012

2013
    Method:
2014
      - if we have a configured device, we try to ensure that it matches
2015
        our config
2016
      - if not, we create it from zero
2017
      - anyway, set the device parameters
2018

2019
    """
2020
    super(DRBD8, self).Assemble()
2021

    
2022
    self.Attach()
2023
    if self.minor is None:
2024
      # local device completely unconfigured
2025
      self._FastAssemble()
2026
    else:
2027
      # we have to recheck the local and network status and try to fix
2028
      # the device
2029
      self._SlowAssemble()
2030

    
2031
    sync_errors = self.SetSyncParams(self.params)
2032
    if sync_errors:
2033
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2034
                  (self.minor, utils.CommaJoin(sync_errors)))
2035

    
2036
  def _SlowAssemble(self):
2037
    """Assembles the DRBD device from a (partially) configured device.
2038

2039
    In case of partially attached (local device matches but no network
2040
    setup), we perform the network attach. If successful, we re-test
2041
    the attach if can return success.
2042

2043
    """
2044
    # TODO: Rewrite to not use a for loop just because there is 'break'
2045
    # pylint: disable=W0631
2046
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2047
    for minor in (self._aminor,):
2048
      info = self._GetDevInfo(self._GetShowData(minor))
2049
      match_l = self._MatchesLocal(info)
2050
      match_r = self._MatchesNet(info)
2051

    
2052
      if match_l and match_r:
2053
        # everything matches
2054
        break
2055

    
2056
      if match_l and not match_r and "local_addr" not in info:
2057
        # disk matches, but not attached to network, attach and recheck
2058
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2059
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2060
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2061
          break
2062
        else:
2063
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2064
                      " show' disagrees", minor)
2065

    
2066
      if match_r and "local_dev" not in info:
2067
        # no local disk, but network attached and it matches
2068
        self._AssembleLocal(minor, self._children[0].dev_path,
2069
                            self._children[1].dev_path, self.size)
2070
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2071
          break
2072
        else:
2073
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2074
                      " show' disagrees", minor)
2075

    
2076
      # this case must be considered only if we actually have local
2077
      # storage, i.e. not in diskless mode, because all diskless
2078
      # devices are equal from the point of view of local
2079
      # configuration
2080
      if (match_l and "local_dev" in info and
2081
          not match_r and "local_addr" in info):
2082
        # strange case - the device network part points to somewhere
2083
        # else, even though its local storage is ours; as we own the
2084
        # drbd space, we try to disconnect from the remote peer and
2085
        # reconnect to our correct one
2086
        try:
2087
          self._ShutdownNet(minor)
2088
        except errors.BlockDeviceError, err:
2089
          _ThrowError("drbd%d: device has correct local storage, wrong"
2090
                      " remote peer and is unable to disconnect in order"
2091
                      " to attach to the correct peer: %s", minor, str(err))
2092
        # note: _AssembleNet also handles the case when we don't want
2093
        # local storage (i.e. one or more of the _[lr](host|port) is
2094
        # None)
2095
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2096
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2097
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2098
          break
2099
        else:
2100
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2101
                      " show' disagrees", minor)
2102

    
2103
    else:
2104
      minor = None
2105

    
2106
    self._SetFromMinor(minor)
2107
    if minor is None:
2108
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2109
                  self._aminor)
2110

    
2111
  def _FastAssemble(self):
2112
    """Assemble the drbd device from zero.
2113

2114
    This is run when in Assemble we detect our minor is unused.
2115

2116
    """
2117
    minor = self._aminor
2118
    if self._children and self._children[0] and self._children[1]:
2119
      self._AssembleLocal(minor, self._children[0].dev_path,
2120
                          self._children[1].dev_path, self.size)
2121
    if self._lhost and self._lport and self._rhost and self._rport:
2122
      self._AssembleNet(minor,
2123
                        (self._lhost, self._lport, self._rhost, self._rport),
2124
                        constants.DRBD_NET_PROTOCOL,
2125
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2126
    self._SetFromMinor(minor)
2127

    
2128
  @classmethod
2129
  def _ShutdownLocal(cls, minor):
2130
    """Detach from the local device.
2131

2132
    I/Os will continue to be served from the remote device. If we
2133
    don't have a remote device, this operation will fail.
2134

2135
    """
2136
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2137
    if result.failed:
2138
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2139

    
2140
  @classmethod
2141
  def _ShutdownNet(cls, minor):
2142
    """Disconnect from the remote peer.
2143

2144
    This fails if we don't have a local device.
2145

2146
    """
2147
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2148
    if result.failed:
2149
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2150

    
2151
  @classmethod
2152
  def _ShutdownAll(cls, minor):
2153
    """Deactivate the device.
2154

2155
    This will, of course, fail if the device is in use.
2156

2157
    """
2158
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2159
    if result.failed:
2160
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2161
                  minor, result.output)
2162

    
2163
  def Shutdown(self):
2164
    """Shutdown the DRBD device.
2165

2166
    """
2167
    if self.minor is None and not self.Attach():
2168
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2169
      return
2170
    minor = self.minor
2171
    self.minor = None
2172
    self.dev_path = None
2173
    self._ShutdownAll(minor)
2174

    
2175
  def Remove(self):
2176
    """Stub remove for DRBD devices.
2177

2178
    """
2179
    self.Shutdown()
2180

    
2181
  @classmethod
2182
  def Create(cls, unique_id, children, size, params):
2183
    """Create a new DRBD8 device.
2184

2185
    Since DRBD devices are not created per se, just assembled, this
2186
    function only initializes the metadata.
2187

2188
    """
2189
    if len(children) != 2:
2190
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2191
    # check that the minor is unused
2192
    aminor = unique_id[4]
2193
    proc_info = cls._MassageProcData(cls._GetProcData())
2194
    if aminor in proc_info:
2195
      status = DRBD8Status(proc_info[aminor])
2196
      in_use = status.is_in_use
2197
    else:
2198
      in_use = False
2199
    if in_use:
2200
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2201
    meta = children[1]
2202
    meta.Assemble()
2203
    if not meta.Attach():
2204
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2205
                  aminor, meta)
2206
    cls._CheckMetaSize(meta.dev_path)
2207
    cls._InitMeta(aminor, meta.dev_path)
2208
    return cls(unique_id, children, size, params)
2209

    
2210
  def Grow(self, amount, dryrun, backingstore):
2211
    """Resize the DRBD device and its backing storage.
2212

2213
    """
2214
    if self.minor is None:
2215
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2216
    if len(self._children) != 2 or None in self._children:
2217
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2218
    self._children[0].Grow(amount, dryrun, backingstore)
2219
    if dryrun or backingstore:
2220
      # DRBD does not support dry-run mode and is not backing storage,
2221
      # so we'll return here
2222
      return
2223
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2224
                           "%dm" % (self.size + amount)])
2225
    if result.failed:
2226
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2227

    
2228

    
2229
class FileStorage(BlockDev):
2230
  """File device.
2231

2232
  This class represents the a file storage backend device.
2233

2234
  The unique_id for the file device is a (file_driver, file_path) tuple.
2235

2236
  """
2237
  def __init__(self, unique_id, children, size, params):
2238
    """Initalizes a file device backend.
2239

2240
    """
2241
    if children:
2242
      raise errors.BlockDeviceError("Invalid setup for file device")
2243
    super(FileStorage, self).__init__(unique_id, children, size, params)
2244
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2245
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2246
    self.driver = unique_id[0]
2247
    self.dev_path = unique_id[1]
2248

    
2249
    CheckFileStoragePath(self.dev_path)
2250

    
2251
    self.Attach()
2252

    
2253
  def Assemble(self):
2254
    """Assemble the device.
2255

2256
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2257

2258
    """
2259
    if not os.path.exists(self.dev_path):
2260
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2261

    
2262
  def Shutdown(self):
2263
    """Shutdown the device.
2264

2265
    This is a no-op for the file type, as we don't deactivate
2266
    the file on shutdown.
2267

2268
    """
2269
    pass
2270

    
2271
  def Open(self, force=False):
2272
    """Make the device ready for I/O.
2273

2274
    This is a no-op for the file type.
2275

2276
    """
2277
    pass
2278

    
2279
  def Close(self):
2280
    """Notifies that the device will no longer be used for I/O.
2281

2282
    This is a no-op for the file type.
2283

2284
    """
2285
    pass
2286

    
2287
  def Remove(self):
2288
    """Remove the file backing the block device.
2289

2290
    @rtype: boolean
2291
    @return: True if the removal was successful
2292

2293
    """
2294
    try:
2295
      os.remove(self.dev_path)
2296
    except OSError, err:
2297
      if err.errno != errno.ENOENT:
2298
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2299

    
2300
  def Rename(self, new_id):
2301
    """Renames the file.
2302

2303
    """
2304
    # TODO: implement rename for file-based storage
2305
    _ThrowError("Rename is not supported for file-based storage")
2306

    
2307
  def Grow(self, amount, dryrun, backingstore):
2308
    """Grow the file
2309

2310
    @param amount: the amount (in mebibytes) to grow with
2311

2312
    """
2313
    if not backingstore:
2314
      return
2315
    # Check that the file exists
2316
    self.Assemble()
2317
    current_size = self.GetActualSize()
2318
    new_size = current_size + amount * 1024 * 1024
2319
    assert new_size > current_size, "Cannot Grow with a negative amount"
2320
    # We can't really simulate the growth
2321
    if dryrun:
2322
      return
2323
    try:
2324
      f = open(self.dev_path, "a+")
2325
      f.truncate(new_size)
2326
      f.close()
2327
    except EnvironmentError, err:
2328
      _ThrowError("Error in file growth: %", str(err))
2329

    
2330
  def Attach(self):
2331
    """Attach to an existing file.
2332

2333
    Check if this file already exists.
2334

2335
    @rtype: boolean
2336
    @return: True if file exists
2337

2338
    """
2339
    self.attached = os.path.exists(self.dev_path)
2340
    return self.attached
2341

    
2342
  def GetActualSize(self):
2343
    """Return the actual disk size.
2344

2345
    @note: the device needs to be active when this is called
2346

2347
    """
2348
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2349
    try:
2350
      st = os.stat(self.dev_path)
2351
      return st.st_size
2352
    except OSError, err:
2353
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2354

    
2355
  @classmethod
2356
  def Create(cls, unique_id, children, size, params):
2357
    """Create a new file.
2358

2359
    @param size: the size of file in MiB
2360

2361
    @rtype: L{bdev.FileStorage}
2362
    @return: an instance of FileStorage
2363

2364
    """
2365
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2366
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2367

    
2368
    dev_path = unique_id[1]
2369

    
2370
    CheckFileStoragePath(dev_path)
2371

    
2372
    try:
2373
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2374
      f = os.fdopen(fd, "w")
2375
      f.truncate(size * 1024 * 1024)
2376
      f.close()
2377
    except EnvironmentError, err:
2378
      if err.errno == errno.EEXIST:
2379
        _ThrowError("File already existing: %s", dev_path)
2380
      _ThrowError("Error in file creation: %", str(err))
2381

    
2382
    return FileStorage(unique_id, children, size, params)
2383

    
2384

    
2385
class PersistentBlockDevice(BlockDev):
2386
  """A block device with persistent node
2387

2388
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2389
  udev helpers are probably required to give persistent, human-friendly
2390
  names.
2391

2392
  For the time being, pathnames are required to lie under /dev.
2393

2394
  """
2395
  def __init__(self, unique_id, children, size, params):
2396
    """Attaches to a static block device.
2397

2398
    The unique_id is a path under /dev.
2399

2400
    """
2401
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2402
                                                params)
2403
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2404
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2405
    self.dev_path = unique_id[1]
2406
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2407
      raise ValueError("Full path '%s' lies outside /dev" %
2408
                              os.path.realpath(self.dev_path))
2409
    # TODO: this is just a safety guard checking that we only deal with devices
2410
    # we know how to handle. In the future this will be integrated with
2411
    # external storage backends and possible values will probably be collected
2412
    # from the cluster configuration.
2413
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2414
      raise ValueError("Got persistent block device of invalid type: %s" %
2415
                       unique_id[0])
2416

    
2417
    self.major = self.minor = None
2418
    self.Attach()
2419

    
2420
  @classmethod
2421
  def Create(cls, unique_id, children, size, params):
2422
    """Create a new device
2423

2424
    This is a noop, we only return a PersistentBlockDevice instance
2425

2426
    """
2427
    return PersistentBlockDevice(unique_id, children, 0, params)
2428

    
2429
  def Remove(self):
2430
    """Remove a device
2431

2432
    This is a noop
2433

2434
    """
2435
    pass
2436

    
2437
  def Rename(self, new_id):
2438
    """Rename this device.
2439

2440
    """
2441
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2442

    
2443
  def Attach(self):
2444
    """Attach to an existing block device.
2445

2446

2447
    """
2448
    self.attached = False
2449
    try:
2450
      st = os.stat(self.dev_path)
2451
    except OSError, err:
2452
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2453
      return False
2454

    
2455
    if not stat.S_ISBLK(st.st_mode):
2456
      logging.error("%s is not a block device", self.dev_path)
2457
      return False
2458

    
2459
    self.major = os.major(st.st_rdev)
2460
    self.minor = os.minor(st.st_rdev)
2461
    self.attached = True
2462

    
2463
    return True
2464

    
2465
  def Assemble(self):
2466
    """Assemble the device.
2467

2468
    """
2469
    pass
2470

    
2471
  def Shutdown(self):
2472
    """Shutdown the device.
2473

2474
    """
2475
    pass
2476

    
2477
  def Open(self, force=False):
2478
    """Make the device ready for I/O.
2479

2480
    """
2481
    pass
2482

    
2483
  def Close(self):
2484
    """Notifies that the device will no longer be used for I/O.
2485

2486
    """
2487
    pass
2488

    
2489
  def Grow(self, amount, dryrun, backingstore):
2490
    """Grow the logical volume.
2491

2492
    """
2493
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2494

    
2495

    
2496
class RADOSBlockDevice(BlockDev):
2497
  """A RADOS Block Device (rbd).
2498

2499
  This class implements the RADOS Block Device for the backend. You need
2500
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2501
  this to be functional.
2502

2503
  """
2504
  def __init__(self, unique_id, children, size, params):
2505
    """Attaches to an rbd device.
2506

2507
    """
2508
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2509
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2510
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2511

    
2512
    self.driver, self.rbd_name = unique_id
2513

    
2514
    self.major = self.minor = None
2515
    self.Attach()
2516

    
2517
  @classmethod
2518
  def Create(cls, unique_id, children, size, params):
2519
    """Create a new rbd device.
2520

2521
    Provision a new rbd volume inside a RADOS pool.
2522

2523
    """
2524
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2525
      raise errors.ProgrammerError("Invalid configuration data %s" %
2526
                                   str(unique_id))
2527
    rbd_pool = params[constants.LDP_POOL]
2528
    rbd_name = unique_id[1]
2529

    
2530
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2531
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2532
           rbd_name, "--size", "%s" % size]
2533
    result = utils.RunCmd(cmd)
2534
    if result.failed:
2535
      _ThrowError("rbd creation failed (%s): %s",
2536
                  result.fail_reason, result.output)
2537

    
2538
    return RADOSBlockDevice(unique_id, children, size, params)
2539

    
2540
  def Remove(self):
2541
    """Remove the rbd device.
2542

2543
    """
2544
    rbd_pool = self.params[constants.LDP_POOL]
2545
    rbd_name = self.unique_id[1]
2546

    
2547
    if not self.minor and not self.Attach():
2548
      # The rbd device doesn't exist.
2549
      return
2550

    
2551
    # First shutdown the device (remove mappings).
2552
    self.Shutdown()
2553

    
2554
    # Remove the actual Volume (Image) from the RADOS cluster.
2555
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2556
    result = utils.RunCmd(cmd)
2557
    if result.failed:
2558
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2559
                  result.fail_reason, result.output)
2560

    
2561
  def Rename(self, new_id):
2562
    """Rename this device.
2563

2564
    """
2565
    pass
2566

    
2567
  def Attach(self):
2568
    """Attach to an existing rbd device.
2569

2570
    This method maps the rbd volume that matches our name with
2571
    an rbd device and then attaches to this device.
2572

2573
    """
2574
    self.attached = False
2575

    
2576
    # Map the rbd volume to a block device under /dev
2577
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2578

    
2579
    try:
2580
      st = os.stat(self.dev_path)
2581
    except OSError, err:
2582
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2583
      return False
2584

    
2585
    if not stat.S_ISBLK(st.st_mode):
2586
      logging.error("%s is not a block device", self.dev_path)
2587
      return False
2588

    
2589
    self.major = os.major(st.st_rdev)
2590
    self.minor = os.minor(st.st_rdev)
2591
    self.attached = True
2592

    
2593
    return True
2594

    
2595
  def _MapVolumeToBlockdev(self, unique_id):
2596
    """Maps existing rbd volumes to block devices.
2597

2598
    This method should be idempotent if the mapping already exists.
2599

2600
    @rtype: string
2601
    @return: the block device path that corresponds to the volume
2602

2603
    """
2604
    pool = self.params[constants.LDP_POOL]
2605
    name = unique_id[1]
2606

    
2607
    # Check if the mapping already exists.
2608
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2609
    result = utils.RunCmd(showmap_cmd)
2610
    if result.failed:
2611
      _ThrowError("rbd showmapped failed (%s): %s",
2612
                  result.fail_reason, result.output)
2613

    
2614
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2615

    
2616
    if rbd_dev:
2617
      # The mapping exists. Return it.
2618
      return rbd_dev
2619

    
2620
    # The mapping doesn't exist. Create it.
2621
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2622
    result = utils.RunCmd(map_cmd)
2623
    if result.failed:
2624
      _ThrowError("rbd map failed (%s): %s",
2625
                  result.fail_reason, result.output)
2626

    
2627
    # Find the corresponding rbd device.
2628
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2629
    result = utils.RunCmd(showmap_cmd)
2630
    if result.failed:
2631
      _ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
2632
                  result.fail_reason, result.output)
2633

    
2634
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2635

    
2636
    if not rbd_dev:
2637
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2638
                  " device in output of showmapped, for volume: %s", name)
2639

    
2640
    # The device was successfully mapped. Return it.
2641
    return rbd_dev
2642

    
2643
  @staticmethod
2644
  def _ParseRbdShowmappedOutput(output, volume_name):
2645
    """Parse the output of `rbd showmapped'.
2646

2647
    This method parses the output of `rbd showmapped' and returns
2648
    the rbd block device path (e.g. /dev/rbd0) that matches the
2649
    given rbd volume.
2650

2651
    @type output: string
2652
    @param output: the whole output of `rbd showmapped'
2653
    @type volume_name: string
2654
    @param volume_name: the name of the volume whose device we search for
2655
    @rtype: string or None
2656
    @return: block device path if the volume is mapped, else None
2657

2658
    """
2659
    allfields = 5
2660
    volumefield = 2
2661
    devicefield = 4
2662

    
2663
    field_sep = "\t"
2664

    
2665
    lines = output.splitlines()
2666
    splitted_lines = map(lambda l: l.split(field_sep), lines)
2667

    
2668
    # Check empty output.
2669
    if not splitted_lines:
2670
      _ThrowError("rbd showmapped returned empty output")
2671

    
2672
    # Check showmapped header line, to determine number of fields.
2673
    field_cnt = len(splitted_lines[0])
2674
    if field_cnt != allfields:
2675
      _ThrowError("Cannot parse rbd showmapped output because its format"
2676
                  " seems to have changed; expected %s fields, found %s",
2677
                  allfields, field_cnt)
2678

    
2679
    matched_lines = \
2680
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2681
             splitted_lines)
2682

    
2683
    if len(matched_lines) > 1:
2684
      _ThrowError("The rbd volume %s is mapped more than once."
2685
                  " This shouldn't happen, try to unmap the extra"
2686
                  " devices manually.", volume_name)
2687

    
2688
    if matched_lines:
2689
      # rbd block device found. Return it.
2690
      rbd_dev = matched_lines[0][devicefield]
2691
      return rbd_dev
2692

    
2693
    # The given volume is not mapped.
2694
    return None
2695

    
2696
  def Assemble(self):
2697
    """Assemble the device.
2698

2699
    """
2700
    pass
2701

    
2702
  def Shutdown(self):
2703
    """Shutdown the device.
2704

2705
    """
2706
    if not self.minor and not self.Attach():
2707
      # The rbd device doesn't exist.
2708
      return
2709

    
2710
    # Unmap the block device from the Volume.
2711
    self._UnmapVolumeFromBlockdev(self.unique_id)
2712

    
2713
    self.minor = None
2714
    self.dev_path = None
2715

    
2716
  def _UnmapVolumeFromBlockdev(self, unique_id):
2717
    """Unmaps the rbd device from the Volume it is mapped.
2718

2719
    Unmaps the rbd device from the Volume it was previously mapped to.
2720
    This method should be idempotent if the Volume isn't mapped.
2721

2722
    """
2723
    pool = self.params[constants.LDP_POOL]
2724
    name = unique_id[1]
2725

    
2726
    # Check if the mapping already exists.
2727
    showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2728
    result = utils.RunCmd(showmap_cmd)
2729
    if result.failed:
2730
      _ThrowError("rbd showmapped failed [during unmap](%s): %s",
2731
                  result.fail_reason, result.output)
2732

    
2733
    rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
2734

    
2735
    if rbd_dev:
2736
      # The mapping exists. Unmap the rbd device.
2737
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2738
      result = utils.RunCmd(unmap_cmd)
2739
      if result.failed:
2740
        _ThrowError("rbd unmap failed (%s): %s",
2741
                    result.fail_reason, result.output)
2742

    
2743
  def Open(self, force=False):
2744
    """Make the device ready for I/O.
2745

2746
    """
2747
    pass
2748

    
2749
  def Close(self):
2750
    """Notifies that the device will no longer be used for I/O.
2751

2752
    """
2753
    pass
2754

    
2755
  def Grow(self, amount, dryrun, backingstore):
2756
    """Grow the Volume.
2757

2758
    @type amount: integer
2759
    @param amount: the amount (in mebibytes) to grow with
2760
    @type dryrun: boolean
2761
    @param dryrun: whether to execute the operation in simulation mode
2762
        only, without actually increasing the size
2763

2764
    """
2765
    if not backingstore:
2766
      return
2767
    if not self.Attach():
2768
      _ThrowError("Can't attach to rbd device during Grow()")
2769

    
2770
    if dryrun:
2771
      # the rbd tool does not support dry runs of resize operations.
2772
      # Since rbd volumes are thinly provisioned, we assume
2773
      # there is always enough free space for the operation.
2774
      return
2775

    
2776
    rbd_pool = self.params[constants.LDP_POOL]
2777
    rbd_name = self.unique_id[1]
2778
    new_size = self.size + amount
2779

    
2780
    # Resize the rbd volume (Image) inside the RADOS cluster.
2781
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2782
           rbd_name, "--size", "%s" % new_size]
2783
    result = utils.RunCmd(cmd)
2784
    if result.failed:
2785
      _ThrowError("rbd resize failed (%s): %s",
2786
                  result.fail_reason, result.output)
2787

    
2788

    
2789
class ExtStorageDevice(BlockDev):
2790
  """A block device provided by an ExtStorage Provider.
2791

2792
  This class implements the External Storage Interface, which means
2793
  handling of the externally provided block devices.
2794

2795
  """
2796
  def __init__(self, unique_id, children, size, params):
2797
    """Attaches to an extstorage block device.
2798

2799
    """
2800
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2801
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2802
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2803

    
2804
    self.driver, self.vol_name = unique_id
2805

    
2806
    self.major = self.minor = None
2807
    self.Attach()
2808

    
2809
  @classmethod
2810
  def Create(cls, unique_id, children, size, params):
2811
    """Create a new extstorage device.
2812

2813
    Provision a new volume using an extstorage provider, which will
2814
    then be mapped to a block device.
2815

2816
    """
2817
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2818
      raise errors.ProgrammerError("Invalid configuration data %s" %
2819
                                   str(unique_id))
2820

    
2821
    # Call the External Storage's create script,
2822
    # to provision a new Volume inside the External Storage
2823
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id, str(size))
2824

    
2825
    return ExtStorageDevice(unique_id, children, size, params)
2826

    
2827
  def Remove(self):
2828
    """Remove the extstorage device.
2829

2830
    """
2831
    if not self.minor and not self.Attach():
2832
      # The extstorage device doesn't exist.
2833
      return
2834

    
2835
    # First shutdown the device (remove mappings).
2836
    self.Shutdown()
2837

    
2838
    # Call the External Storage's remove script,
2839
    # to remove the Volume from the External Storage
2840
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id)
2841

    
2842
  def Rename(self, new_id):
2843
    """Rename this device.
2844

2845
    """
2846
    pass
2847

    
2848
  def Attach(self):
2849
    """Attach to an existing extstorage device.
2850

2851
    This method maps the extstorage volume that matches our name with
2852
    a corresponding block device and then attaches to this device.
2853

2854
    """
2855
    self.attached = False
2856

    
2857
    # Call the External Storage's attach script,
2858
    # to attach an existing Volume to a block device under /dev
2859
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
2860
                                      self.unique_id)
2861

    
2862
    try:
2863
      st = os.stat(self.dev_path)
2864
    except OSError, err:
2865
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2866
      return False
2867

    
2868
    if not stat.S_ISBLK(st.st_mode):
2869
      logging.error("%s is not a block device", self.dev_path)
2870
      return False
2871

    
2872
    self.major = os.major(st.st_rdev)
2873
    self.minor = os.minor(st.st_rdev)
2874
    self.attached = True
2875

    
2876
    return True
2877

    
2878
  def Assemble(self):
2879
    """Assemble the device.
2880

2881
    """
2882
    pass
2883

    
2884
  def Shutdown(self):
2885
    """Shutdown the device.
2886

2887
    """
2888
    if not self.minor and not self.Attach():
2889
      # The extstorage device doesn't exist.
2890
      return
2891

    
2892
    # Call the External Storage's detach script,
2893
    # to detach an existing Volume from it's block device under /dev
2894
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id)
2895

    
2896
    self.minor = None
2897
    self.dev_path = None
2898

    
2899
  def Open(self, force=False):
2900
    """Make the device ready for I/O.
2901

2902
    """
2903
    pass
2904

    
2905
  def Close(self):
2906
    """Notifies that the device will no longer be used for I/O.
2907

2908
    """
2909
    pass
2910

    
2911
  def Grow(self, amount, dryrun, backingstore):
2912
    """Grow the Volume.
2913

2914
    @type amount: integer
2915
    @param amount: the amount (in mebibytes) to grow with
2916
    @type dryrun: boolean
2917
    @param dryrun: whether to execute the operation in simulation mode
2918
        only, without actually increasing the size
2919

2920
    """
2921
    if not backingstore:
2922
      return
2923
    if not self.Attach():
2924
      _ThrowError("Can't attach to extstorage device during Grow()")
2925

    
2926
    if dryrun:
2927
      # we do not support dry runs of resize operations for now.
2928
      return
2929

    
2930
    new_size = self.size + amount
2931

    
2932
    # Call the External Storage's grow script,
2933
    # to grow an existing Volume inside the External Storage
2934
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
2935
                      str(self.size), grow=str(new_size))
2936

    
2937
  def SetInfo(self, text):
2938
    """Update metadata with info text.
2939

2940
    """
2941
    # Replace invalid characters
2942
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
2943
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
2944

    
2945
    # Only up to 128 characters are allowed
2946
    text = text[:128]
2947

    
2948
    # Call the External Storage's setinfo script,
2949
    # to set metadata for an existing Volume inside the External Storage
2950
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
2951
                      metadata=text)
2952

    
2953

    
2954
def _ExtStorageAction(action, unique_id, size=None, grow=None, metadata=None):
2955
  """Take an External Storage action.
2956

2957
  Take an External Storage action concerning or affecting
2958
  a specific Volume inside the External Storage.
2959

2960
  @type action: string
2961
  @param action: which action to perform. One of:
2962
                 create / remove / grow / attach / detach
2963
  @type unique_id: tuple (driver, vol_name)
2964
  @param unique_id: a tuple containing the type of ExtStorage (driver)
2965
                    and the Volume name
2966
  @type size: integer
2967
  @param size: the size of the Volume in mebibytes
2968
  @type grow: integer
2969
  @param grow: the new size in mebibytes (after grow)
2970
  @type metadata: string
2971
  @param metadata: metadata info of the Volume, for use by the provider
2972
  @rtype: None or a block device path (during attach)
2973

2974
  """
2975
  driver, vol_name = unique_id
2976

    
2977
  # Create an External Storage instance of type `driver'
2978
  status, inst_es = ExtStorageFromDisk(driver)
2979
  if not status:
2980
    _ThrowError("%s" % inst_es)
2981

    
2982
  # Create the basic environment for the driver's scripts
2983
  create_env = _ExtStorageEnvironment(unique_id, size, grow, metadata)
2984

    
2985
  # Do not use log file for action `attach' as we need
2986
  # to get the output from RunResult
2987
  # TODO: find a way to have a log file for attach too
2988
  logfile = None
2989
  if action is not constants.ES_ACTION_ATTACH:
2990
    logfile = _VolumeLogName(action, driver, vol_name)
2991

    
2992
  # Make sure the given action results in a valid script
2993
  if action not in constants.ES_SCRIPTS:
2994
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
2995
                action)
2996

    
2997
  # Find out which external script to run according the given action
2998
  script_name = action + "_script"
2999
  script = getattr(inst_es, script_name)
3000

    
3001
  # Run the external script
3002
  result = utils.RunCmd([script], env=create_env,
3003
                        cwd=inst_es.path, output=logfile,)
3004
  if result.failed:
3005
    logging.error("External storage's %s command '%s' returned"
3006
                  " error: %s, logfile: %s, output: %s",
3007
                  action, result.cmd, result.fail_reason,
3008
                  logfile, result.output)
3009

    
3010
    # If logfile is 'None' (during attach), it breaks TailFile
3011
    # TODO: have a log file for attach too
3012
    if action is not constants.ES_ACTION_ATTACH:
3013
      lines = [utils.SafeEncode(val)
3014
               for val in utils.TailFile(logfile, lines=20)]
3015
    else:
3016
      lines = result.output[-20:]
3017

    
3018
    _ThrowError("External storage's %s script failed (%s), last"
3019
                " lines of output:\n%s",
3020
                action, result.fail_reason, "\n".join(lines))
3021

    
3022
  if action == constants.ES_ACTION_ATTACH:
3023
    return result.stdout
3024

    
3025

    
3026
def ExtStorageFromDisk(name, base_dir=None):
3027
  """Create an ExtStorage instance from disk.
3028

3029
  This function will return an ExtStorage instance
3030
  if the given name is a valid ExtStorage name.
3031

3032
  @type base_dir: string
3033
  @keyword base_dir: Base directory containing ExtStorage installations.
3034
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3035
  @rtype: tuple
3036
  @return: True and the ExtStorage instance if we find a valid one, or
3037
      False and the diagnose message on error
3038

3039
  """
3040
  if base_dir is None:
3041
    es_base_dir = pathutils.ES_SEARCH_PATH
3042
  else:
3043
    es_base_dir = [base_dir]
3044

    
3045
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3046

    
3047
  if es_dir is None:
3048
    return False, ("Directory for External Storage Provider %s not"
3049
                   " found in search path" % name)
3050

    
3051
  # ES Files dictionary, we will populate it with the absolute path
3052
  # names; if the value is True, then it is a required file, otherwise
3053
  # an optional one
3054
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3055

    
3056
  for filename in es_files:
3057
    es_files[filename] = utils.PathJoin(es_dir, filename)
3058

    
3059
    try:
3060
      st = os.stat(es_files[filename])
3061
    except EnvironmentError, err:
3062
      return False, ("File '%s' under path '%s' is missing (%s)" %
3063
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3064

    
3065
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3066
      return False, ("File '%s' under path '%s' is not a regular file" %
3067
                     (filename, es_dir))
3068

    
3069
    if filename in constants.ES_SCRIPTS:
3070
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3071
        return False, ("File '%s' under path '%s' is not executable" %
3072
                       (filename, es_dir))
3073

    
3074
  es_obj = \
3075
    objects.ExtStorage(name=name, path=es_dir,
3076
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3077
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3078
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3079
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3080
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3081
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO])
3082
  return True, es_obj
3083

    
3084

    
3085
def _ExtStorageEnvironment(unique_id, size=None, grow=None, metadata=None):
3086
  """Calculate the environment for an External Storage script.
3087

3088
  @type unique_id: tuple (driver, vol_name)
3089
  @param unique_id: ExtStorage pool and name of the Volume
3090
  @type size: string
3091
  @param size: size of the Volume (in mebibytes)
3092
  @type grow: string
3093
  @param grow: new size of Volume after grow (in mebibytes)
3094
  @type metadata: string
3095
  @param metadata: metadata info of the Volume
3096
  @rtype: dict
3097
  @return: dict of environment variables
3098

3099
  """
3100
  vol_name = unique_id[1]
3101

    
3102
  result = {}
3103
  result["VOL_NAME"] = vol_name
3104

    
3105
  if size is not None:
3106
    result["VOL_SIZE"] = size
3107

    
3108
  if grow is not None:
3109
    result["VOL_NEW_SIZE"] = grow
3110

    
3111
  if metadata is not None:
3112
    result["VOL_METADATA"] = metadata
3113

    
3114
  return result
3115

    
3116

    
3117
def _VolumeLogName(kind, es_name, volume):
3118
  """Compute the ExtStorage log filename for a given Volume and operation.
3119

3120
  @type kind: string
3121
  @param kind: the operation type (e.g. create, remove etc.)
3122
  @type es_name: string
3123
  @param es_name: the ExtStorage name
3124
  @type volume: string
3125
  @param volume: the name of the Volume inside the External Storage
3126

3127
  """
3128
  # Check if the extstorage log dir is a valid dir
3129
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3130
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3131

    
3132
  # TODO: Use tempfile.mkstemp to create unique filename
3133
  base = ("%s-%s-%s-%s.log" %
3134
          (kind, es_name, volume, utils.TimestampForFilename()))
3135
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3136

    
3137

    
3138
DEV_MAP = {
3139
  constants.LD_LV: LogicalVolume,
3140
  constants.LD_DRBD8: DRBD8,
3141
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3142
  constants.LD_RBD: RADOSBlockDevice,
3143
  constants.LD_EXT: ExtStorageDevice,
3144
  }
3145

    
3146
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3147
  DEV_MAP[constants.LD_FILE] = FileStorage
3148

    
3149

    
3150
def _VerifyDiskType(dev_type):
3151
  if dev_type not in DEV_MAP:
3152
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3153

    
3154

    
3155
def _VerifyDiskParams(disk):
3156
  """Verifies if all disk parameters are set.
3157

3158
  """
3159
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3160
  if missing:
3161
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3162
                                 missing)
3163

    
3164

    
3165
def FindDevice(disk, children):
3166
  """Search for an existing, assembled device.
3167

3168
  This will succeed only if the device exists and is assembled, but it
3169
  does not do any actions in order to activate the device.
3170

3171
  @type disk: L{objects.Disk}
3172
  @param disk: the disk object to find
3173
  @type children: list of L{bdev.BlockDev}
3174
  @param children: the list of block devices that are children of the device
3175
                  represented by the disk parameter
3176

3177
  """
3178
  _VerifyDiskType(disk.dev_type)
3179
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3180
                                  disk.params)
3181
  if not device.attached:
3182
    return None
3183
  return device
3184

    
3185

    
3186
def Assemble(disk, children):
3187
  """Try to attach or assemble an existing device.
3188

3189
  This will attach to assemble the device, as needed, to bring it
3190
  fully up. It must be safe to run on already-assembled devices.
3191

3192
  @type disk: L{objects.Disk}
3193
  @param disk: the disk object to assemble
3194
  @type children: list of L{bdev.BlockDev}
3195
  @param children: the list of block devices that are children of the device
3196
                  represented by the disk parameter
3197

3198
  """
3199
  _VerifyDiskType(disk.dev_type)
3200
  _VerifyDiskParams(disk)
3201
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3202
                                  disk.params)
3203
  device.Assemble()
3204
  return device
3205

    
3206

    
3207
def Create(disk, children):
3208
  """Create a device.
3209

3210
  @type disk: L{objects.Disk}
3211
  @param disk: the disk object to create
3212
  @type children: list of L{bdev.BlockDev}
3213
  @param children: the list of block devices that are children of the device
3214
                  represented by the disk parameter
3215

3216
  """
3217
  _VerifyDiskType(disk.dev_type)
3218
  _VerifyDiskParams(disk)
3219
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3220
                                         disk.params)
3221
  return device