Statistics
| Branch: | Tag: | Revision:

root / lib / bdev.py @ 91c17910

History | View | Annotate | Download (109.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41
from ganeti import serializer
42

    
43

    
44
# Size of reads in _CanReadDevice
45
_DEVICE_READ_SIZE = 128 * 1024
46

    
47

    
48
class RbdShowmappedJsonError(Exception):
49
  """`rbd showmmapped' JSON formatting error Exception class.
50

51
  """
52
  pass
53

    
54

    
55
def _IgnoreError(fn, *args, **kwargs):
56
  """Executes the given function, ignoring BlockDeviceErrors.
57

58
  This is used in order to simplify the execution of cleanup or
59
  rollback functions.
60

61
  @rtype: boolean
62
  @return: True when fn didn't raise an exception, False otherwise
63

64
  """
65
  try:
66
    fn(*args, **kwargs)
67
    return True
68
  except errors.BlockDeviceError, err:
69
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
70
    return False
71

    
72

    
73
def _ThrowError(msg, *args):
74
  """Log an error to the node daemon and the raise an exception.
75

76
  @type msg: string
77
  @param msg: the text of the exception
78
  @raise errors.BlockDeviceError
79

80
  """
81
  if args:
82
    msg = msg % args
83
  logging.error(msg)
84
  raise errors.BlockDeviceError(msg)
85

    
86

    
87
def _CheckResult(result):
88
  """Throws an error if the given result is a failed one.
89

90
  @param result: result from RunCmd
91

92
  """
93
  if result.failed:
94
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
95
                result.output)
96

    
97

    
98
def _CanReadDevice(path):
99
  """Check if we can read from the given device.
100

101
  This tries to read the first 128k of the device.
102

103
  """
104
  try:
105
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
106
    return True
107
  except EnvironmentError:
108
    logging.warning("Can't read from device %s", path, exc_info=True)
109
    return False
110

    
111

    
112
def _GetForbiddenFileStoragePaths():
113
  """Builds a list of path prefixes which shouldn't be used for file storage.
114

115
  @rtype: frozenset
116

117
  """
118
  paths = set([
119
    "/boot",
120
    "/dev",
121
    "/etc",
122
    "/home",
123
    "/proc",
124
    "/root",
125
    "/sys",
126
    ])
127

    
128
  for prefix in ["", "/usr", "/usr/local"]:
129
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
130
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
131

    
132
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
133

    
134

    
135
def _ComputeWrongFileStoragePaths(paths,
136
                                  _forbidden=_GetForbiddenFileStoragePaths()):
137
  """Cross-checks a list of paths for prefixes considered bad.
138

139
  Some paths, e.g. "/bin", should not be used for file storage.
140

141
  @type paths: list
142
  @param paths: List of paths to be checked
143
  @rtype: list
144
  @return: Sorted list of paths for which the user should be warned
145

146
  """
147
  def _Check(path):
148
    return (not os.path.isabs(path) or
149
            path in _forbidden or
150
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
151

    
152
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
153

    
154

    
155
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
156
  """Returns a list of file storage paths whose prefix is considered bad.
157

158
  See L{_ComputeWrongFileStoragePaths}.
159

160
  """
161
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
162

    
163

    
164
def _CheckFileStoragePath(path, allowed):
165
  """Checks if a path is in a list of allowed paths for file storage.
166

167
  @type path: string
168
  @param path: Path to check
169
  @type allowed: list
170
  @param allowed: List of allowed paths
171
  @raise errors.FileStoragePathError: If the path is not allowed
172

173
  """
174
  if not os.path.isabs(path):
175
    raise errors.FileStoragePathError("File storage path must be absolute,"
176
                                      " got '%s'" % path)
177

    
178
  for i in allowed:
179
    if not os.path.isabs(i):
180
      logging.info("Ignoring relative path '%s' for file storage", i)
181
      continue
182

    
183
    if utils.IsBelowDir(i, path):
184
      break
185
  else:
186
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
187
                                      " storage" % path)
188

    
189

    
190
def _LoadAllowedFileStoragePaths(filename):
191
  """Loads file containing allowed file storage paths.
192

193
  @rtype: list
194
  @return: List of allowed paths (can be an empty list)
195

196
  """
197
  try:
198
    contents = utils.ReadFile(filename)
199
  except EnvironmentError:
200
    return []
201
  else:
202
    return utils.FilterEmptyLinesAndComments(contents)
203

    
204

    
205
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
206
  """Checks if a path is allowed for file storage.
207

208
  @type path: string
209
  @param path: Path to check
210
  @raise errors.FileStoragePathError: If the path is not allowed
211

212
  """
213
  allowed = _LoadAllowedFileStoragePaths(_filename)
214

    
215
  if _ComputeWrongFileStoragePaths([path]):
216
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
217
                                      path)
218

    
219
  _CheckFileStoragePath(path, allowed)
220

    
221

    
222
class BlockDev(object):
223
  """Block device abstract class.
224

225
  A block device can be in the following states:
226
    - not existing on the system, and by `Create()` it goes into:
227
    - existing but not setup/not active, and by `Assemble()` goes into:
228
    - active read-write and by `Open()` it goes into
229
    - online (=used, or ready for use)
230

231
  A device can also be online but read-only, however we are not using
232
  the readonly state (LV has it, if needed in the future) and we are
233
  usually looking at this like at a stack, so it's easier to
234
  conceptualise the transition from not-existing to online and back
235
  like a linear one.
236

237
  The many different states of the device are due to the fact that we
238
  need to cover many device types:
239
    - logical volumes are created, lvchange -a y $lv, and used
240
    - drbd devices are attached to a local disk/remote peer and made primary
241

242
  A block device is identified by three items:
243
    - the /dev path of the device (dynamic)
244
    - a unique ID of the device (static)
245
    - it's major/minor pair (dynamic)
246

247
  Not all devices implement both the first two as distinct items. LVM
248
  logical volumes have their unique ID (the pair volume group, logical
249
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
250
  the /dev path is again dynamic and the unique id is the pair (host1,
251
  dev1), (host2, dev2).
252

253
  You can get to a device in two ways:
254
    - creating the (real) device, which returns you
255
      an attached instance (lvcreate)
256
    - attaching of a python instance to an existing (real) device
257

258
  The second point, the attachment to a device, is different
259
  depending on whether the device is assembled or not. At init() time,
260
  we search for a device with the same unique_id as us. If found,
261
  good. It also means that the device is already assembled. If not,
262
  after assembly we'll have our correct major/minor.
263

264
  """
265
  def __init__(self, unique_id, children, size, params):
266
    self._children = children
267
    self.dev_path = None
268
    self.unique_id = unique_id
269
    self.major = None
270
    self.minor = None
271
    self.attached = False
272
    self.size = size
273
    self.params = params
274

    
275
  def Assemble(self):
276
    """Assemble the device from its components.
277

278
    Implementations of this method by child classes must ensure that:
279
      - after the device has been assembled, it knows its major/minor
280
        numbers; this allows other devices (usually parents) to probe
281
        correctly for their children
282
      - calling this method on an existing, in-use device is safe
283
      - if the device is already configured (and in an OK state),
284
        this method is idempotent
285

286
    """
287
    pass
288

    
289
  def Attach(self):
290
    """Find a device which matches our config and attach to it.
291

292
    """
293
    raise NotImplementedError
294

    
295
  def Close(self):
296
    """Notifies that the device will no longer be used for I/O.
297

298
    """
299
    raise NotImplementedError
300

    
301
  @classmethod
302
  def Create(cls, unique_id, children, size, params, excl_stor):
303
    """Create the device.
304

305
    If the device cannot be created, it will return None
306
    instead. Error messages go to the logging system.
307

308
    Note that for some devices, the unique_id is used, and for other,
309
    the children. The idea is that these two, taken together, are
310
    enough for both creation and assembly (later).
311

312
    """
313
    raise NotImplementedError
314

    
315
  def Remove(self):
316
    """Remove this device.
317

318
    This makes sense only for some of the device types: LV and file
319
    storage. Also note that if the device can't attach, the removal
320
    can't be completed.
321

322
    """
323
    raise NotImplementedError
324

    
325
  def Rename(self, new_id):
326
    """Rename this device.
327

328
    This may or may not make sense for a given device type.
329

330
    """
331
    raise NotImplementedError
332

    
333
  def Open(self, force=False):
334
    """Make the device ready for use.
335

336
    This makes the device ready for I/O. For now, just the DRBD
337
    devices need this.
338

339
    The force parameter signifies that if the device has any kind of
340
    --force thing, it should be used, we know what we are doing.
341

342
    """
343
    raise NotImplementedError
344

    
345
  def Shutdown(self):
346
    """Shut down the device, freeing its children.
347

348
    This undoes the `Assemble()` work, except for the child
349
    assembling; as such, the children on the device are still
350
    assembled after this call.
351

352
    """
353
    raise NotImplementedError
354

    
355
  def SetSyncParams(self, params):
356
    """Adjust the synchronization parameters of the mirror.
357

358
    In case this is not a mirroring device, this is no-op.
359

360
    @param params: dictionary of LD level disk parameters related to the
361
    synchronization.
362
    @rtype: list
363
    @return: a list of error messages, emitted both by the current node and by
364
    children. An empty list means no errors.
365

366
    """
367
    result = []
368
    if self._children:
369
      for child in self._children:
370
        result.extend(child.SetSyncParams(params))
371
    return result
372

    
373
  def PauseResumeSync(self, pause):
374
    """Pause/Resume the sync of the mirror.
375

376
    In case this is not a mirroring device, this is no-op.
377

378
    @param pause: Whether to pause or resume
379

380
    """
381
    result = True
382
    if self._children:
383
      for child in self._children:
384
        result = result and child.PauseResumeSync(pause)
385
    return result
386

    
387
  def GetSyncStatus(self):
388
    """Returns the sync status of the device.
389

390
    If this device is a mirroring device, this function returns the
391
    status of the mirror.
392

393
    If sync_percent is None, it means the device is not syncing.
394

395
    If estimated_time is None, it means we can't estimate
396
    the time needed, otherwise it's the time left in seconds.
397

398
    If is_degraded is True, it means the device is missing
399
    redundancy. This is usually a sign that something went wrong in
400
    the device setup, if sync_percent is None.
401

402
    The ldisk parameter represents the degradation of the local
403
    data. This is only valid for some devices, the rest will always
404
    return False (not degraded).
405

406
    @rtype: objects.BlockDevStatus
407

408
    """
409
    return objects.BlockDevStatus(dev_path=self.dev_path,
410
                                  major=self.major,
411
                                  minor=self.minor,
412
                                  sync_percent=None,
413
                                  estimated_time=None,
414
                                  is_degraded=False,
415
                                  ldisk_status=constants.LDS_OKAY)
416

    
417
  def CombinedSyncStatus(self):
418
    """Calculate the mirror status recursively for our children.
419

420
    The return value is the same as for `GetSyncStatus()` except the
421
    minimum percent and maximum time are calculated across our
422
    children.
423

424
    @rtype: objects.BlockDevStatus
425

426
    """
427
    status = self.GetSyncStatus()
428

    
429
    min_percent = status.sync_percent
430
    max_time = status.estimated_time
431
    is_degraded = status.is_degraded
432
    ldisk_status = status.ldisk_status
433

    
434
    if self._children:
435
      for child in self._children:
436
        child_status = child.GetSyncStatus()
437

    
438
        if min_percent is None:
439
          min_percent = child_status.sync_percent
440
        elif child_status.sync_percent is not None:
441
          min_percent = min(min_percent, child_status.sync_percent)
442

    
443
        if max_time is None:
444
          max_time = child_status.estimated_time
445
        elif child_status.estimated_time is not None:
446
          max_time = max(max_time, child_status.estimated_time)
447

    
448
        is_degraded = is_degraded or child_status.is_degraded
449

    
450
        if ldisk_status is None:
451
          ldisk_status = child_status.ldisk_status
452
        elif child_status.ldisk_status is not None:
453
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
454

    
455
    return objects.BlockDevStatus(dev_path=self.dev_path,
456
                                  major=self.major,
457
                                  minor=self.minor,
458
                                  sync_percent=min_percent,
459
                                  estimated_time=max_time,
460
                                  is_degraded=is_degraded,
461
                                  ldisk_status=ldisk_status)
462

    
463
  def SetInfo(self, text):
464
    """Update metadata with info text.
465

466
    Only supported for some device types.
467

468
    """
469
    for child in self._children:
470
      child.SetInfo(text)
471

    
472
  def Grow(self, amount, dryrun, backingstore):
473
    """Grow the block device.
474

475
    @type amount: integer
476
    @param amount: the amount (in mebibytes) to grow with
477
    @type dryrun: boolean
478
    @param dryrun: whether to execute the operation in simulation mode
479
        only, without actually increasing the size
480
    @param backingstore: whether to execute the operation on backing storage
481
        only, or on "logical" storage only; e.g. DRBD is logical storage,
482
        whereas LVM, file, RBD are backing storage
483

484
    """
485
    raise NotImplementedError
486

    
487
  def GetActualSize(self):
488
    """Return the actual disk size.
489

490
    @note: the device needs to be active when this is called
491

492
    """
493
    assert self.attached, "BlockDevice not attached in GetActualSize()"
494
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
495
    if result.failed:
496
      _ThrowError("blockdev failed (%s): %s",
497
                  result.fail_reason, result.output)
498
    try:
499
      sz = int(result.output.strip())
500
    except (ValueError, TypeError), err:
501
      _ThrowError("Failed to parse blockdev output: %s", str(err))
502
    return sz
503

    
504
  def __repr__(self):
505
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
506
            (self.__class__, self.unique_id, self._children,
507
             self.major, self.minor, self.dev_path))
508

    
509

    
510
class LogicalVolume(BlockDev):
511
  """Logical Volume block device.
512

513
  """
514
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
515
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
516
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
517

    
518
  def __init__(self, unique_id, children, size, params):
519
    """Attaches to a LV device.
520

521
    The unique_id is a tuple (vg_name, lv_name)
522

523
    """
524
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
525
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
526
      raise ValueError("Invalid configuration data %s" % str(unique_id))
527
    self._vg_name, self._lv_name = unique_id
528
    self._ValidateName(self._vg_name)
529
    self._ValidateName(self._lv_name)
530
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
531
    self._degraded = True
532
    self.major = self.minor = self.pe_size = self.stripe_count = None
533
    self.Attach()
534

    
535
  @staticmethod
536
  def _GetStdPvSize(pvs_info):
537
    """Return the the standard PV size (used with exclusive storage).
538

539
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
540
    @rtype: float
541
    @return: size in MiB
542

543
    """
544
    assert len(pvs_info) > 0
545
    smallest = min([pv.size for pv in pvs_info])
546
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
547

    
548
  @staticmethod
549
  def _ComputeNumPvs(size, pvs_info):
550
    """Compute the number of PVs needed for an LV (with exclusive storage).
551

552
    @type size: float
553
    @param size: LV size in MiB
554
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
555
    @rtype: integer
556
    @return: number of PVs needed
557
    """
558
    assert len(pvs_info) > 0
559
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
560
    return int(math.ceil(float(size) / pv_size))
561

    
562
  @staticmethod
563
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
564
    """Return a list of empty PVs, by name.
565

566
    """
567
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
568
    if max_pvs is not None:
569
      empty_pvs = empty_pvs[:max_pvs]
570
    return map((lambda pv: pv.name), empty_pvs)
571

    
572
  @classmethod
573
  def Create(cls, unique_id, children, size, params, excl_stor):
574
    """Create a new logical volume.
575

576
    """
577
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
578
      raise errors.ProgrammerError("Invalid configuration data %s" %
579
                                   str(unique_id))
580
    vg_name, lv_name = unique_id
581
    cls._ValidateName(vg_name)
582
    cls._ValidateName(lv_name)
583
    pvs_info = cls.GetPVInfo([vg_name])
584
    if not pvs_info:
585
      if excl_stor:
586
        msg = "No (empty) PVs found"
587
      else:
588
        msg = "Can't compute PV info for vg %s" % vg_name
589
      _ThrowError(msg)
590
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
591

    
592
    pvlist = [pv.name for pv in pvs_info]
593
    if compat.any(":" in v for v in pvlist):
594
      _ThrowError("Some of your PVs have the invalid character ':' in their"
595
                  " name, this is not supported - please filter them out"
596
                  " in lvm.conf using either 'filter' or 'preferred_names'")
597

    
598
    current_pvs = len(pvlist)
599
    desired_stripes = params[constants.LDP_STRIPES]
600
    stripes = min(current_pvs, desired_stripes)
601

    
602
    if excl_stor:
603
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
604
      if err_msgs:
605
        for m in err_msgs:
606
          logging.warning(m)
607
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
608
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
609
      current_pvs = len(pvlist)
610
      if current_pvs < req_pvs:
611
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
612
                    " %d available, %d needed", size, current_pvs, req_pvs)
613
      assert current_pvs == len(pvlist)
614
      if stripes > current_pvs:
615
        # No warning issued for this, as it's no surprise
616
        stripes = current_pvs
617

    
618
    else:
619
      if stripes < desired_stripes:
620
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
621
                        " available.", desired_stripes, vg_name, current_pvs)
622
      free_size = sum([pv.free for pv in pvs_info])
623
      # The size constraint should have been checked from the master before
624
      # calling the create function.
625
      if free_size < size:
626
        _ThrowError("Not enough free space: required %s,"
627
                    " available %s", size, free_size)
628

    
629
    # If the free space is not well distributed, we won't be able to
630
    # create an optimally-striped volume; in that case, we want to try
631
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
632
    # stripes
633
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
634
    for stripes_arg in range(stripes, 0, -1):
635
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
636
      if not result.failed:
637
        break
638
    if result.failed:
639
      _ThrowError("LV create failed (%s): %s",
640
                  result.fail_reason, result.output)
641
    return LogicalVolume(unique_id, children, size, params)
642

    
643
  @staticmethod
644
  def _GetVolumeInfo(lvm_cmd, fields):
645
    """Returns LVM Volumen infos using lvm_cmd
646

647
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
648
    @param fields: Fields to return
649
    @return: A list of dicts each with the parsed fields
650

651
    """
652
    if not fields:
653
      raise errors.ProgrammerError("No fields specified")
654

    
655
    sep = "|"
656
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
657
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
658

    
659
    result = utils.RunCmd(cmd)
660
    if result.failed:
661
      raise errors.CommandError("Can't get the volume information: %s - %s" %
662
                                (result.fail_reason, result.output))
663

    
664
    data = []
665
    for line in result.stdout.splitlines():
666
      splitted_fields = line.strip().split(sep)
667

    
668
      if len(fields) != len(splitted_fields):
669
        raise errors.CommandError("Can't parse %s output: line '%s'" %
670
                                  (lvm_cmd, line))
671

    
672
      data.append(splitted_fields)
673

    
674
    return data
675

    
676
  @classmethod
677
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
678
    """Get the free space info for PVs in a volume group.
679

680
    @param vg_names: list of volume group names, if empty all will be returned
681
    @param filter_allocatable: whether to skip over unallocatable PVs
682
    @param include_lvs: whether to include a list of LVs hosted on each PV
683

684
    @rtype: list
685
    @return: list of objects.LvmPvInfo objects
686

687
    """
688
    # We request "lv_name" field only if we care about LVs, so we don't get
689
    # a long list of entries with many duplicates unless we really have to.
690
    # The duplicate "pv_name" field will be ignored.
691
    if include_lvs:
692
      lvfield = "lv_name"
693
    else:
694
      lvfield = "pv_name"
695
    try:
696
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
697
                                        "pv_attr", "pv_size", lvfield])
698
    except errors.GenericError, err:
699
      logging.error("Can't get PV information: %s", err)
700
      return None
701

    
702
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
703
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
704
    # out duplicates.
705
    if include_lvs:
706
      info.sort(key=(lambda i: (i[0], i[5])))
707
    data = []
708
    lastpvi = None
709
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
710
      # (possibly) skip over pvs which are not allocatable
711
      if filter_allocatable and pv_attr[0] != "a":
712
        continue
713
      # (possibly) skip over pvs which are not in the right volume group(s)
714
      if vg_names and vg_name not in vg_names:
715
        continue
716
      # Beware of duplicates (check before inserting)
717
      if lastpvi and lastpvi.name == pv_name:
718
        if include_lvs and lv_name:
719
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
720
            lastpvi.lv_list.append(lv_name)
721
      else:
722
        if include_lvs and lv_name:
723
          lvl = [lv_name]
724
        else:
725
          lvl = []
726
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
727
                                    size=float(pv_size), free=float(pv_free),
728
                                    attributes=pv_attr, lv_list=lvl)
729
        data.append(lastpvi)
730

    
731
    return data
732

    
733
  @classmethod
734
  def _GetExclusiveStorageVgFree(cls, vg_name):
735
    """Return the free disk space in the given VG, in exclusive storage mode.
736

737
    @type vg_name: string
738
    @param vg_name: VG name
739
    @rtype: float
740
    @return: free space in MiB
741
    """
742
    pvs_info = cls.GetPVInfo([vg_name])
743
    if not pvs_info:
744
      return 0.0
745
    pv_size = cls._GetStdPvSize(pvs_info)
746
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
747
    return pv_size * num_pvs
748

    
749
  @classmethod
750
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
751
    """Get the free space info for specific VGs.
752

753
    @param vg_names: list of volume group names, if empty all will be returned
754
    @param excl_stor: whether exclusive_storage is enabled
755
    @param filter_readonly: whether to skip over readonly VGs
756

757
    @rtype: list
758
    @return: list of tuples (free_space, total_size, name) with free_space in
759
             MiB
760

761
    """
762
    try:
763
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
764
                                        "vg_size"])
765
    except errors.GenericError, err:
766
      logging.error("Can't get VG information: %s", err)
767
      return None
768

    
769
    data = []
770
    for vg_name, vg_free, vg_attr, vg_size in info:
771
      # (possibly) skip over vgs which are not writable
772
      if filter_readonly and vg_attr[0] == "r":
773
        continue
774
      # (possibly) skip over vgs which are not in the right volume group(s)
775
      if vg_names and vg_name not in vg_names:
776
        continue
777
      # Exclusive storage needs a different concept of free space
778
      if excl_stor:
779
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
780
        assert es_free <= vg_free
781
        vg_free = es_free
782
      data.append((float(vg_free), float(vg_size), vg_name))
783

    
784
    return data
785

    
786
  @classmethod
787
  def _ValidateName(cls, name):
788
    """Validates that a given name is valid as VG or LV name.
789

790
    The list of valid characters and restricted names is taken out of
791
    the lvm(8) manpage, with the simplification that we enforce both
792
    VG and LV restrictions on the names.
793

794
    """
795
    if (not cls._VALID_NAME_RE.match(name) or
796
        name in cls._INVALID_NAMES or
797
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
798
      _ThrowError("Invalid LVM name '%s'", name)
799

    
800
  def Remove(self):
801
    """Remove this logical volume.
802

803
    """
804
    if not self.minor and not self.Attach():
805
      # the LV does not exist
806
      return
807
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
808
                           (self._vg_name, self._lv_name)])
809
    if result.failed:
810
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
811

    
812
  def Rename(self, new_id):
813
    """Rename this logical volume.
814

815
    """
816
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
817
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
818
    new_vg, new_name = new_id
819
    if new_vg != self._vg_name:
820
      raise errors.ProgrammerError("Can't move a logical volume across"
821
                                   " volume groups (from %s to to %s)" %
822
                                   (self._vg_name, new_vg))
823
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
824
    if result.failed:
825
      _ThrowError("Failed to rename the logical volume: %s", result.output)
826
    self._lv_name = new_name
827
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
828

    
829
  def Attach(self):
830
    """Attach to an existing LV.
831

832
    This method will try to see if an existing and active LV exists
833
    which matches our name. If so, its major/minor will be
834
    recorded.
835

836
    """
837
    self.attached = False
838
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
839
                           "--units=m", "--nosuffix",
840
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
841
                           "vg_extent_size,stripes", self.dev_path])
842
    if result.failed:
843
      logging.error("Can't find LV %s: %s, %s",
844
                    self.dev_path, result.fail_reason, result.output)
845
      return False
846
    # the output can (and will) have multiple lines for multi-segment
847
    # LVs, as the 'stripes' parameter is a segment one, so we take
848
    # only the last entry, which is the one we're interested in; note
849
    # that with LVM2 anyway the 'stripes' value must be constant
850
    # across segments, so this is a no-op actually
851
    out = result.stdout.splitlines()
852
    if not out: # totally empty result? splitlines() returns at least
853
                # one line for any non-empty string
854
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
855
      return False
856
    out = out[-1].strip().rstrip(",")
857
    out = out.split(",")
858
    if len(out) != 5:
859
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
860
      return False
861

    
862
    status, major, minor, pe_size, stripes = out
863
    if len(status) < 6:
864
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
865
      return False
866

    
867
    try:
868
      major = int(major)
869
      minor = int(minor)
870
    except (TypeError, ValueError), err:
871
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
872

    
873
    try:
874
      pe_size = int(float(pe_size))
875
    except (TypeError, ValueError), err:
876
      logging.error("Can't parse vg extent size: %s", err)
877
      return False
878

    
879
    try:
880
      stripes = int(stripes)
881
    except (TypeError, ValueError), err:
882
      logging.error("Can't parse the number of stripes: %s", err)
883
      return False
884

    
885
    self.major = major
886
    self.minor = minor
887
    self.pe_size = pe_size
888
    self.stripe_count = stripes
889
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
890
                                      # storage
891
    self.attached = True
892
    return True
893

    
894
  def Assemble(self):
895
    """Assemble the device.
896

897
    We always run `lvchange -ay` on the LV to ensure it's active before
898
    use, as there were cases when xenvg was not active after boot
899
    (also possibly after disk issues).
900

901
    """
902
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
903
    if result.failed:
904
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
905

    
906
  def Shutdown(self):
907
    """Shutdown the device.
908

909
    This is a no-op for the LV device type, as we don't deactivate the
910
    volumes on shutdown.
911

912
    """
913
    pass
914

    
915
  def GetSyncStatus(self):
916
    """Returns the sync status of the device.
917

918
    If this device is a mirroring device, this function returns the
919
    status of the mirror.
920

921
    For logical volumes, sync_percent and estimated_time are always
922
    None (no recovery in progress, as we don't handle the mirrored LV
923
    case). The is_degraded parameter is the inverse of the ldisk
924
    parameter.
925

926
    For the ldisk parameter, we check if the logical volume has the
927
    'virtual' type, which means it's not backed by existing storage
928
    anymore (read from it return I/O error). This happens after a
929
    physical disk failure and subsequent 'vgreduce --removemissing' on
930
    the volume group.
931

932
    The status was already read in Attach, so we just return it.
933

934
    @rtype: objects.BlockDevStatus
935

936
    """
937
    if self._degraded:
938
      ldisk_status = constants.LDS_FAULTY
939
    else:
940
      ldisk_status = constants.LDS_OKAY
941

    
942
    return objects.BlockDevStatus(dev_path=self.dev_path,
943
                                  major=self.major,
944
                                  minor=self.minor,
945
                                  sync_percent=None,
946
                                  estimated_time=None,
947
                                  is_degraded=self._degraded,
948
                                  ldisk_status=ldisk_status)
949

    
950
  def Open(self, force=False):
951
    """Make the device ready for I/O.
952

953
    This is a no-op for the LV device type.
954

955
    """
956
    pass
957

    
958
  def Close(self):
959
    """Notifies that the device will no longer be used for I/O.
960

961
    This is a no-op for the LV device type.
962

963
    """
964
    pass
965

    
966
  def Snapshot(self, size):
967
    """Create a snapshot copy of an lvm block device.
968

969
    @returns: tuple (vg, lv)
970

971
    """
972
    snap_name = self._lv_name + ".snap"
973

    
974
    # remove existing snapshot if found
975
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
976
    _IgnoreError(snap.Remove)
977

    
978
    vg_info = self.GetVGInfo([self._vg_name], False)
979
    if not vg_info:
980
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
981
    free_size, _, _ = vg_info[0]
982
    if free_size < size:
983
      _ThrowError("Not enough free space: required %s,"
984
                  " available %s", size, free_size)
985

    
986
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
987
                               "-n%s" % snap_name, self.dev_path]))
988

    
989
    return (self._vg_name, snap_name)
990

    
991
  def _RemoveOldInfo(self):
992
    """Try to remove old tags from the lv.
993

994
    """
995
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
996
                           self.dev_path])
997
    _CheckResult(result)
998

    
999
    raw_tags = result.stdout.strip()
1000
    if raw_tags:
1001
      for tag in raw_tags.split(","):
1002
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1003
                                   tag.strip(), self.dev_path]))
1004

    
1005
  def SetInfo(self, text):
1006
    """Update metadata with info text.
1007

1008
    """
1009
    BlockDev.SetInfo(self, text)
1010

    
1011
    self._RemoveOldInfo()
1012

    
1013
    # Replace invalid characters
1014
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1015
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1016

    
1017
    # Only up to 128 characters are allowed
1018
    text = text[:128]
1019

    
1020
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1021

    
1022
  def Grow(self, amount, dryrun, backingstore):
1023
    """Grow the logical volume.
1024

1025
    """
1026
    if not backingstore:
1027
      return
1028
    if self.pe_size is None or self.stripe_count is None:
1029
      if not self.Attach():
1030
        _ThrowError("Can't attach to LV during Grow()")
1031
    full_stripe_size = self.pe_size * self.stripe_count
1032
    rest = amount % full_stripe_size
1033
    if rest != 0:
1034
      amount += full_stripe_size - rest
1035
    cmd = ["lvextend", "-L", "+%dm" % amount]
1036
    if dryrun:
1037
      cmd.append("--test")
1038
    # we try multiple algorithms since the 'best' ones might not have
1039
    # space available in the right place, but later ones might (since
1040
    # they have less constraints); also note that only recent LVM
1041
    # supports 'cling'
1042
    for alloc_policy in "contiguous", "cling", "normal":
1043
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1044
      if not result.failed:
1045
        return
1046
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1047

    
1048

    
1049
class DRBD8Status(object):
1050
  """A DRBD status representation class.
1051

1052
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
1053

1054
  """
1055
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1056
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1057
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1058
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1059
                       # Due to a bug in drbd in the kernel, introduced in
1060
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1061
                       "(?:\s|M)"
1062
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1063

    
1064
  CS_UNCONFIGURED = "Unconfigured"
1065
  CS_STANDALONE = "StandAlone"
1066
  CS_WFCONNECTION = "WFConnection"
1067
  CS_WFREPORTPARAMS = "WFReportParams"
1068
  CS_CONNECTED = "Connected"
1069
  CS_STARTINGSYNCS = "StartingSyncS"
1070
  CS_STARTINGSYNCT = "StartingSyncT"
1071
  CS_WFBITMAPS = "WFBitMapS"
1072
  CS_WFBITMAPT = "WFBitMapT"
1073
  CS_WFSYNCUUID = "WFSyncUUID"
1074
  CS_SYNCSOURCE = "SyncSource"
1075
  CS_SYNCTARGET = "SyncTarget"
1076
  CS_PAUSEDSYNCS = "PausedSyncS"
1077
  CS_PAUSEDSYNCT = "PausedSyncT"
1078
  CSET_SYNC = compat.UniqueFrozenset([
1079
    CS_WFREPORTPARAMS,
1080
    CS_STARTINGSYNCS,
1081
    CS_STARTINGSYNCT,
1082
    CS_WFBITMAPS,
1083
    CS_WFBITMAPT,
1084
    CS_WFSYNCUUID,
1085
    CS_SYNCSOURCE,
1086
    CS_SYNCTARGET,
1087
    CS_PAUSEDSYNCS,
1088
    CS_PAUSEDSYNCT,
1089
    ])
1090

    
1091
  DS_DISKLESS = "Diskless"
1092
  DS_ATTACHING = "Attaching" # transient state
1093
  DS_FAILED = "Failed" # transient state, next: diskless
1094
  DS_NEGOTIATING = "Negotiating" # transient state
1095
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1096
  DS_OUTDATED = "Outdated"
1097
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1098
  DS_CONSISTENT = "Consistent"
1099
  DS_UPTODATE = "UpToDate" # normal state
1100

    
1101
  RO_PRIMARY = "Primary"
1102
  RO_SECONDARY = "Secondary"
1103
  RO_UNKNOWN = "Unknown"
1104

    
1105
  def __init__(self, procline):
1106
    u = self.UNCONF_RE.match(procline)
1107
    if u:
1108
      self.cstatus = self.CS_UNCONFIGURED
1109
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1110
    else:
1111
      m = self.LINE_RE.match(procline)
1112
      if not m:
1113
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1114
      self.cstatus = m.group(1)
1115
      self.lrole = m.group(2)
1116
      self.rrole = m.group(3)
1117
      self.ldisk = m.group(4)
1118
      self.rdisk = m.group(5)
1119

    
1120
    # end reading of data from the LINE_RE or UNCONF_RE
1121

    
1122
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1123
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1124
    self.is_connected = self.cstatus == self.CS_CONNECTED
1125
    self.is_primary = self.lrole == self.RO_PRIMARY
1126
    self.is_secondary = self.lrole == self.RO_SECONDARY
1127
    self.peer_primary = self.rrole == self.RO_PRIMARY
1128
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1129
    self.both_primary = self.is_primary and self.peer_primary
1130
    self.both_secondary = self.is_secondary and self.peer_secondary
1131

    
1132
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1133
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1134

    
1135
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1136
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1137

    
1138
    m = self.SYNC_RE.match(procline)
1139
    if m:
1140
      self.sync_percent = float(m.group(1))
1141
      hours = int(m.group(2))
1142
      minutes = int(m.group(3))
1143
      seconds = int(m.group(4))
1144
      self.est_time = hours * 3600 + minutes * 60 + seconds
1145
    else:
1146
      # we have (in this if branch) no percent information, but if
1147
      # we're resyncing we need to 'fake' a sync percent information,
1148
      # as this is how cmdlib determines if it makes sense to wait for
1149
      # resyncing or not
1150
      if self.is_in_resync:
1151
        self.sync_percent = 0
1152
      else:
1153
        self.sync_percent = None
1154
      self.est_time = None
1155

    
1156

    
1157
class BaseDRBD(BlockDev): # pylint: disable=W0223
1158
  """Base DRBD class.
1159

1160
  This class contains a few bits of common functionality between the
1161
  0.7 and 8.x versions of DRBD.
1162

1163
  """
1164
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1165
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1166
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1167
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1168

    
1169
  _DRBD_MAJOR = 147
1170
  _ST_UNCONFIGURED = "Unconfigured"
1171
  _ST_WFCONNECTION = "WFConnection"
1172
  _ST_CONNECTED = "Connected"
1173

    
1174
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1175
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1176

    
1177
  @staticmethod
1178
  def _GetProcData(filename=_STATUS_FILE):
1179
    """Return data from /proc/drbd.
1180

1181
    """
1182
    try:
1183
      data = utils.ReadFile(filename).splitlines()
1184
    except EnvironmentError, err:
1185
      if err.errno == errno.ENOENT:
1186
        _ThrowError("The file %s cannot be opened, check if the module"
1187
                    " is loaded (%s)", filename, str(err))
1188
      else:
1189
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1190
    if not data:
1191
      _ThrowError("Can't read any data from %s", filename)
1192
    return data
1193

    
1194
  @classmethod
1195
  def _MassageProcData(cls, data):
1196
    """Transform the output of _GetProdData into a nicer form.
1197

1198
    @return: a dictionary of minor: joined lines from /proc/drbd
1199
        for that minor
1200

1201
    """
1202
    results = {}
1203
    old_minor = old_line = None
1204
    for line in data:
1205
      if not line: # completely empty lines, as can be returned by drbd8.0+
1206
        continue
1207
      lresult = cls._VALID_LINE_RE.match(line)
1208
      if lresult is not None:
1209
        if old_minor is not None:
1210
          results[old_minor] = old_line
1211
        old_minor = int(lresult.group(1))
1212
        old_line = line
1213
      else:
1214
        if old_minor is not None:
1215
          old_line += " " + line.strip()
1216
    # add last line
1217
    if old_minor is not None:
1218
      results[old_minor] = old_line
1219
    return results
1220

    
1221
  @classmethod
1222
  def _GetVersion(cls, proc_data):
1223
    """Return the DRBD version.
1224

1225
    This will return a dict with keys:
1226
      - k_major
1227
      - k_minor
1228
      - k_point
1229
      - api
1230
      - proto
1231
      - proto2 (only on drbd > 8.2.X)
1232

1233
    """
1234
    first_line = proc_data[0].strip()
1235
    version = cls._VERSION_RE.match(first_line)
1236
    if not version:
1237
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1238
                                    first_line)
1239

    
1240
    values = version.groups()
1241
    retval = {
1242
      "k_major": int(values[0]),
1243
      "k_minor": int(values[1]),
1244
      "k_point": int(values[2]),
1245
      "api": int(values[3]),
1246
      "proto": int(values[4]),
1247
      }
1248
    if values[5] is not None:
1249
      retval["proto2"] = values[5]
1250

    
1251
    return retval
1252

    
1253
  @staticmethod
1254
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1255
    """Returns DRBD usermode_helper currently set.
1256

1257
    """
1258
    try:
1259
      helper = utils.ReadFile(filename).splitlines()[0]
1260
    except EnvironmentError, err:
1261
      if err.errno == errno.ENOENT:
1262
        _ThrowError("The file %s cannot be opened, check if the module"
1263
                    " is loaded (%s)", filename, str(err))
1264
      else:
1265
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1266
    if not helper:
1267
      _ThrowError("Can't read any data from %s", filename)
1268
    return helper
1269

    
1270
  @staticmethod
1271
  def _DevPath(minor):
1272
    """Return the path to a drbd device for a given minor.
1273

1274
    """
1275
    return "/dev/drbd%d" % minor
1276

    
1277
  @classmethod
1278
  def GetUsedDevs(cls):
1279
    """Compute the list of used DRBD devices.
1280

1281
    """
1282
    data = cls._GetProcData()
1283

    
1284
    used_devs = {}
1285
    for line in data:
1286
      match = cls._VALID_LINE_RE.match(line)
1287
      if not match:
1288
        continue
1289
      minor = int(match.group(1))
1290
      state = match.group(2)
1291
      if state == cls._ST_UNCONFIGURED:
1292
        continue
1293
      used_devs[minor] = state, line
1294

    
1295
    return used_devs
1296

    
1297
  def _SetFromMinor(self, minor):
1298
    """Set our parameters based on the given minor.
1299

1300
    This sets our minor variable and our dev_path.
1301

1302
    """
1303
    if minor is None:
1304
      self.minor = self.dev_path = None
1305
      self.attached = False
1306
    else:
1307
      self.minor = minor
1308
      self.dev_path = self._DevPath(minor)
1309
      self.attached = True
1310

    
1311
  @staticmethod
1312
  def _CheckMetaSize(meta_device):
1313
    """Check if the given meta device looks like a valid one.
1314

1315
    This currently only checks the size, which must be around
1316
    128MiB.
1317

1318
    """
1319
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1320
    if result.failed:
1321
      _ThrowError("Failed to get device size: %s - %s",
1322
                  result.fail_reason, result.output)
1323
    try:
1324
      sectors = int(result.stdout)
1325
    except (TypeError, ValueError):
1326
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1327
    num_bytes = sectors * 512
1328
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1329
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1330
    # the maximum *valid* size of the meta device when living on top
1331
    # of LVM is hard to compute: it depends on the number of stripes
1332
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1333
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1334
    # size meta device; as such, we restrict it to 1GB (a little bit
1335
    # too generous, but making assumptions about PE size is hard)
1336
    if num_bytes > 1024 * 1024 * 1024:
1337
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1338

    
1339
  def Rename(self, new_id):
1340
    """Rename a device.
1341

1342
    This is not supported for drbd devices.
1343

1344
    """
1345
    raise errors.ProgrammerError("Can't rename a drbd device")
1346

    
1347

    
1348
class DRBD8(BaseDRBD):
1349
  """DRBD v8.x block device.
1350

1351
  This implements the local host part of the DRBD device, i.e. it
1352
  doesn't do anything to the supposed peer. If you need a fully
1353
  connected DRBD pair, you need to use this class on both hosts.
1354

1355
  The unique_id for the drbd device is a (local_ip, local_port,
1356
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1357
  two children: the data device and the meta_device. The meta device
1358
  is checked for valid size and is zeroed on create.
1359

1360
  """
1361
  _MAX_MINORS = 255
1362
  _PARSE_SHOW = None
1363

    
1364
  # timeout constants
1365
  _NET_RECONFIG_TIMEOUT = 60
1366

    
1367
  # command line options for barriers
1368
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1369
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1370
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1371
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1372

    
1373
  def __init__(self, unique_id, children, size, params):
1374
    if children and children.count(None) > 0:
1375
      children = []
1376
    if len(children) not in (0, 2):
1377
      raise ValueError("Invalid configuration data %s" % str(children))
1378
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1379
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1380
    (self._lhost, self._lport,
1381
     self._rhost, self._rport,
1382
     self._aminor, self._secret) = unique_id
1383
    if children:
1384
      if not _CanReadDevice(children[1].dev_path):
1385
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1386
        children = []
1387
    super(DRBD8, self).__init__(unique_id, children, size, params)
1388
    self.major = self._DRBD_MAJOR
1389
    version = self._GetVersion(self._GetProcData())
1390
    if version["k_major"] != 8:
1391
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1392
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1393
                  version["k_major"], version["k_minor"])
1394

    
1395
    if (self._lhost is not None and self._lhost == self._rhost and
1396
        self._lport == self._rport):
1397
      raise ValueError("Invalid configuration data, same local/remote %s" %
1398
                       (unique_id,))
1399
    self.Attach()
1400

    
1401
  @classmethod
1402
  def _InitMeta(cls, minor, dev_path):
1403
    """Initialize a meta device.
1404

1405
    This will not work if the given minor is in use.
1406

1407
    """
1408
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1409
    # try to auto-detect existing filesystems or similar (see
1410
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1411
    # care about the first 128MB of data in the device, even though it
1412
    # can be bigger
1413
    result = utils.RunCmd([constants.DD_CMD,
1414
                           "if=/dev/zero", "of=%s" % dev_path,
1415
                           "bs=1048576", "count=128", "oflag=direct"])
1416
    if result.failed:
1417
      _ThrowError("Can't wipe the meta device: %s", result.output)
1418

    
1419
    result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor),
1420
                           "v08", dev_path, "0", "create-md"])
1421
    if result.failed:
1422
      _ThrowError("Can't initialize meta device: %s", result.output)
1423

    
1424
  @classmethod
1425
  def _FindUnusedMinor(cls):
1426
    """Find an unused DRBD device.
1427

1428
    This is specific to 8.x as the minors are allocated dynamically,
1429
    so non-existing numbers up to a max minor count are actually free.
1430

1431
    """
1432
    data = cls._GetProcData()
1433

    
1434
    highest = None
1435
    for line in data:
1436
      match = cls._UNUSED_LINE_RE.match(line)
1437
      if match:
1438
        return int(match.group(1))
1439
      match = cls._VALID_LINE_RE.match(line)
1440
      if match:
1441
        minor = int(match.group(1))
1442
        highest = max(highest, minor)
1443
    if highest is None: # there are no minors in use at all
1444
      return 0
1445
    if highest >= cls._MAX_MINORS:
1446
      logging.error("Error: no free drbd minors!")
1447
      raise errors.BlockDeviceError("Can't find a free DRBD minor")
1448
    return highest + 1
1449

    
1450
  @classmethod
1451
  def _GetShowParser(cls):
1452
    """Return a parser for `drbd show` output.
1453

1454
    This will either create or return an already-created parser for the
1455
    output of the command `drbd show`.
1456

1457
    """
1458
    if cls._PARSE_SHOW is not None:
1459
      return cls._PARSE_SHOW
1460

    
1461
    # pyparsing setup
1462
    lbrace = pyp.Literal("{").suppress()
1463
    rbrace = pyp.Literal("}").suppress()
1464
    lbracket = pyp.Literal("[").suppress()
1465
    rbracket = pyp.Literal("]").suppress()
1466
    semi = pyp.Literal(";").suppress()
1467
    colon = pyp.Literal(":").suppress()
1468
    # this also converts the value to an int
1469
    number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1470

    
1471
    comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1472
    defa = pyp.Literal("_is_default").suppress()
1473
    dbl_quote = pyp.Literal('"').suppress()
1474

    
1475
    keyword = pyp.Word(pyp.alphanums + "-")
1476

    
1477
    # value types
1478
    value = pyp.Word(pyp.alphanums + "_-/.:")
1479
    quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1480
    ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1481
                 pyp.Word(pyp.nums + ".") + colon + number)
1482
    ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1483
                 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1484
                 pyp.Optional(rbracket) + colon + number)
1485
    # meta device, extended syntax
1486
    meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1487
    # device name, extended syntax
1488
    device_value = pyp.Literal("minor").suppress() + number
1489

    
1490
    # a statement
1491
    stmt = (~rbrace + keyword + ~lbrace +
1492
            pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1493
                         device_value) +
1494
            pyp.Optional(defa) + semi +
1495
            pyp.Optional(pyp.restOfLine).suppress())
1496

    
1497
    # an entire section
1498
    section_name = pyp.Word(pyp.alphas + "_")
1499
    section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1500

    
1501
    bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1502
    bnf.ignore(comment)
1503

    
1504
    cls._PARSE_SHOW = bnf
1505

    
1506
    return bnf
1507

    
1508
  @classmethod
1509
  def _GetShowData(cls, minor):
1510
    """Return the `drbdsetup show` data for a minor.
1511

1512
    """
1513
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1514
    if result.failed:
1515
      logging.error("Can't display the drbd config: %s - %s",
1516
                    result.fail_reason, result.output)
1517
      return None
1518
    return result.stdout
1519

    
1520
  @classmethod
1521
  def _GetDevInfo(cls, out):
1522
    """Parse details about a given DRBD minor.
1523

1524
    This return, if available, the local backing device (as a path)
1525
    and the local and remote (ip, port) information from a string
1526
    containing the output of the `drbdsetup show` command as returned
1527
    by _GetShowData.
1528

1529
    """
1530
    data = {}
1531
    if not out:
1532
      return data
1533

    
1534
    bnf = cls._GetShowParser()
1535
    # run pyparse
1536

    
1537
    try:
1538
      results = bnf.parseString(out)
1539
    except pyp.ParseException, err:
1540
      _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1541

    
1542
    # and massage the results into our desired format
1543
    for section in results:
1544
      sname = section[0]
1545
      if sname == "_this_host":
1546
        for lst in section[1:]:
1547
          if lst[0] == "disk":
1548
            data["local_dev"] = lst[1]
1549
          elif lst[0] == "meta-disk":
1550
            data["meta_dev"] = lst[1]
1551
            data["meta_index"] = lst[2]
1552
          elif lst[0] == "address":
1553
            data["local_addr"] = tuple(lst[1:])
1554
      elif sname == "_remote_host":
1555
        for lst in section[1:]:
1556
          if lst[0] == "address":
1557
            data["remote_addr"] = tuple(lst[1:])
1558
    return data
1559

    
1560
  def _MatchesLocal(self, info):
1561
    """Test if our local config matches with an existing device.
1562

1563
    The parameter should be as returned from `_GetDevInfo()`. This
1564
    method tests if our local backing device is the same as the one in
1565
    the info parameter, in effect testing if we look like the given
1566
    device.
1567

1568
    """
1569
    if self._children:
1570
      backend, meta = self._children
1571
    else:
1572
      backend = meta = None
1573

    
1574
    if backend is not None:
1575
      retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1576
    else:
1577
      retval = ("local_dev" not in info)
1578

    
1579
    if meta is not None:
1580
      retval = retval and ("meta_dev" in info and
1581
                           info["meta_dev"] == meta.dev_path)
1582
      retval = retval and ("meta_index" in info and
1583
                           info["meta_index"] == 0)
1584
    else:
1585
      retval = retval and ("meta_dev" not in info and
1586
                           "meta_index" not in info)
1587
    return retval
1588

    
1589
  def _MatchesNet(self, info):
1590
    """Test if our network config matches with an existing device.
1591

1592
    The parameter should be as returned from `_GetDevInfo()`. This
1593
    method tests if our network configuration is the same as the one
1594
    in the info parameter, in effect testing if we look like the given
1595
    device.
1596

1597
    """
1598
    if (((self._lhost is None and not ("local_addr" in info)) and
1599
         (self._rhost is None and not ("remote_addr" in info)))):
1600
      return True
1601

    
1602
    if self._lhost is None:
1603
      return False
1604

    
1605
    if not ("local_addr" in info and
1606
            "remote_addr" in info):
1607
      return False
1608

    
1609
    retval = (info["local_addr"] == (self._lhost, self._lport))
1610
    retval = (retval and
1611
              info["remote_addr"] == (self._rhost, self._rport))
1612
    return retval
1613

    
1614
  def _AssembleLocal(self, minor, backend, meta, size):
1615
    """Configure the local part of a DRBD device.
1616

1617
    """
1618
    args = ["drbdsetup", self._DevPath(minor), "disk",
1619
            backend, meta, "0",
1620
            "-e", "detach",
1621
            "--create-device"]
1622
    if size:
1623
      args.extend(["-d", "%sm" % size])
1624

    
1625
    version = self._GetVersion(self._GetProcData())
1626
    vmaj = version["k_major"]
1627
    vmin = version["k_minor"]
1628
    vrel = version["k_point"]
1629

    
1630
    barrier_args = \
1631
      self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1632
                                   self.params[constants.LDP_BARRIERS],
1633
                                   self.params[constants.LDP_NO_META_FLUSH])
1634
    args.extend(barrier_args)
1635

    
1636
    if self.params[constants.LDP_DISK_CUSTOM]:
1637
      args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1638

    
1639
    result = utils.RunCmd(args)
1640
    if result.failed:
1641
      _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1642

    
1643
  @classmethod
1644
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1645
                              disable_meta_flush):
1646
    """Compute the DRBD command line parameters for disk barriers
1647

1648
    Returns a list of the disk barrier parameters as requested via the
1649
    disabled_barriers and disable_meta_flush arguments, and according to the
1650
    supported ones in the DRBD version vmaj.vmin.vrel
1651

1652
    If the desired option is unsupported, raises errors.BlockDeviceError.
1653

1654
    """
1655
    disabled_barriers_set = frozenset(disabled_barriers)
1656
    if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1657
      raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1658
                                    " barriers" % disabled_barriers)
1659

    
1660
    args = []
1661

    
1662
    # The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
1663
    # does not exist)
1664
    if not vmaj == 8 and vmin in (0, 2, 3):
1665
      raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1666
                                    (vmaj, vmin, vrel))
1667

    
1668
    def _AppendOrRaise(option, min_version):
1669
      """Helper for DRBD options"""
1670
      if min_version is not None and vrel >= min_version:
1671
        args.append(option)
1672
      else:
1673
        raise errors.BlockDeviceError("Could not use the option %s as the"
1674
                                      " DRBD version %d.%d.%d does not support"
1675
                                      " it." % (option, vmaj, vmin, vrel))
1676

    
1677
    # the minimum version for each feature is encoded via pairs of (minor
1678
    # version -> x) where x is version in which support for the option was
1679
    # introduced.
1680
    meta_flush_supported = disk_flush_supported = {
1681
      0: 12,
1682
      2: 7,
1683
      3: 0,
1684
      }
1685

    
1686
    disk_drain_supported = {
1687
      2: 7,
1688
      3: 0,
1689
      }
1690

    
1691
    disk_barriers_supported = {
1692
      3: 0,
1693
      }
1694

    
1695
    # meta flushes
1696
    if disable_meta_flush:
1697
      _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1698
                     meta_flush_supported.get(vmin, None))
1699

    
1700
    # disk flushes
1701
    if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1702
      _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1703
                     disk_flush_supported.get(vmin, None))
1704

    
1705
    # disk drain
1706
    if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1707
      _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1708
                     disk_drain_supported.get(vmin, None))
1709

    
1710
    # disk barriers
1711
    if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1712
      _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1713
                     disk_barriers_supported.get(vmin, None))
1714

    
1715
    return args
1716

    
1717
  def _AssembleNet(self, minor, net_info, protocol,
1718
                   dual_pri=False, hmac=None, secret=None):
1719
    """Configure the network part of the device.
1720

1721
    """
1722
    lhost, lport, rhost, rport = net_info
1723
    if None in net_info:
1724
      # we don't want network connection and actually want to make
1725
      # sure its shutdown
1726
      self._ShutdownNet(minor)
1727
      return
1728

    
1729
    # Workaround for a race condition. When DRBD is doing its dance to
1730
    # establish a connection with its peer, it also sends the
1731
    # synchronization speed over the wire. In some cases setting the
1732
    # sync speed only after setting up both sides can race with DRBD
1733
    # connecting, hence we set it here before telling DRBD anything
1734
    # about its peer.
1735
    sync_errors = self._SetMinorSyncParams(minor, self.params)
1736
    if sync_errors:
1737
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1738
                  (minor, utils.CommaJoin(sync_errors)))
1739

    
1740
    if netutils.IP6Address.IsValid(lhost):
1741
      if not netutils.IP6Address.IsValid(rhost):
1742
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1743
                    (minor, lhost, rhost))
1744
      family = "ipv6"
1745
    elif netutils.IP4Address.IsValid(lhost):
1746
      if not netutils.IP4Address.IsValid(rhost):
1747
        _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1748
                    (minor, lhost, rhost))
1749
      family = "ipv4"
1750
    else:
1751
      _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1752

    
1753
    args = ["drbdsetup", self._DevPath(minor), "net",
1754
            "%s:%s:%s" % (family, lhost, lport),
1755
            "%s:%s:%s" % (family, rhost, rport), protocol,
1756
            "-A", "discard-zero-changes",
1757
            "-B", "consensus",
1758
            "--create-device",
1759
            ]
1760
    if dual_pri:
1761
      args.append("-m")
1762
    if hmac and secret:
1763
      args.extend(["-a", hmac, "-x", secret])
1764

    
1765
    if self.params[constants.LDP_NET_CUSTOM]:
1766
      args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1767

    
1768
    result = utils.RunCmd(args)
1769
    if result.failed:
1770
      _ThrowError("drbd%d: can't setup network: %s - %s",
1771
                  minor, result.fail_reason, result.output)
1772

    
1773
    def _CheckNetworkConfig():
1774
      info = self._GetDevInfo(self._GetShowData(minor))
1775
      if not "local_addr" in info or not "remote_addr" in info:
1776
        raise utils.RetryAgain()
1777

    
1778
      if (info["local_addr"] != (lhost, lport) or
1779
          info["remote_addr"] != (rhost, rport)):
1780
        raise utils.RetryAgain()
1781

    
1782
    try:
1783
      utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1784
    except utils.RetryTimeout:
1785
      _ThrowError("drbd%d: timeout while configuring network", minor)
1786

    
1787
  def AddChildren(self, devices):
1788
    """Add a disk to the DRBD device.
1789

1790
    """
1791
    if self.minor is None:
1792
      _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1793
                  self._aminor)
1794
    if len(devices) != 2:
1795
      _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1796
    info = self._GetDevInfo(self._GetShowData(self.minor))
1797
    if "local_dev" in info:
1798
      _ThrowError("drbd%d: already attached to a local disk", self.minor)
1799
    backend, meta = devices
1800
    if backend.dev_path is None or meta.dev_path is None:
1801
      _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1802
    backend.Open()
1803
    meta.Open()
1804
    self._CheckMetaSize(meta.dev_path)
1805
    self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1806

    
1807
    self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1808
    self._children = devices
1809

    
1810
  def RemoveChildren(self, devices):
1811
    """Detach the drbd device from local storage.
1812

1813
    """
1814
    if self.minor is None:
1815
      _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1816
                  self._aminor)
1817
    # early return if we don't actually have backing storage
1818
    info = self._GetDevInfo(self._GetShowData(self.minor))
1819
    if "local_dev" not in info:
1820
      return
1821
    if len(self._children) != 2:
1822
      _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1823
                  self._children)
1824
    if self._children.count(None) == 2: # we don't actually have children :)
1825
      logging.warning("drbd%d: requested detach while detached", self.minor)
1826
      return
1827
    if len(devices) != 2:
1828
      _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1829
    for child, dev in zip(self._children, devices):
1830
      if dev != child.dev_path:
1831
        _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1832
                    " RemoveChildren", self.minor, dev, child.dev_path)
1833

    
1834
    self._ShutdownLocal(self.minor)
1835
    self._children = []
1836

    
1837
  @classmethod
1838
  def _SetMinorSyncParams(cls, minor, params):
1839
    """Set the parameters of the DRBD syncer.
1840

1841
    This is the low-level implementation.
1842

1843
    @type minor: int
1844
    @param minor: the drbd minor whose settings we change
1845
    @type params: dict
1846
    @param params: LD level disk parameters related to the synchronization
1847
    @rtype: list
1848
    @return: a list of error messages
1849

1850
    """
1851

    
1852
    args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1853
    if params[constants.LDP_DYNAMIC_RESYNC]:
1854
      version = cls._GetVersion(cls._GetProcData())
1855
      vmin = version["k_minor"]
1856
      vrel = version["k_point"]
1857

    
1858
      # By definition we are using 8.x, so just check the rest of the version
1859
      # number
1860
      if vmin != 3 or vrel < 9:
1861
        msg = ("The current DRBD version (8.%d.%d) does not support the "
1862
               "dynamic resync speed controller" % (vmin, vrel))
1863
        logging.error(msg)
1864
        return [msg]
1865

    
1866
      if params[constants.LDP_PLAN_AHEAD] == 0:
1867
        msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1868
               " controller at DRBD level. If you want to disable it, please"
1869
               " set the dynamic-resync disk parameter to False.")
1870
        logging.error(msg)
1871
        return [msg]
1872

    
1873
      # add the c-* parameters to args
1874
      args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1875
                   "--c-fill-target", params[constants.LDP_FILL_TARGET],
1876
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1877
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1878
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1879
                   ])
1880

    
1881
    else:
1882
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1883

    
1884
    args.append("--create-device")
1885
    result = utils.RunCmd(args)
1886
    if result.failed:
1887
      msg = ("Can't change syncer rate: %s - %s" %
1888
             (result.fail_reason, result.output))
1889
      logging.error(msg)
1890
      return [msg]
1891

    
1892
    return []
1893

    
1894
  def SetSyncParams(self, params):
1895
    """Set the synchronization parameters of the DRBD syncer.
1896

1897
    @type params: dict
1898
    @param params: LD level disk parameters related to the synchronization
1899
    @rtype: list
1900
    @return: a list of error messages, emitted both by the current node and by
1901
    children. An empty list means no errors
1902

1903
    """
1904
    if self.minor is None:
1905
      err = "Not attached during SetSyncParams"
1906
      logging.info(err)
1907
      return [err]
1908

    
1909
    children_result = super(DRBD8, self).SetSyncParams(params)
1910
    children_result.extend(self._SetMinorSyncParams(self.minor, params))
1911
    return children_result
1912

    
1913
  def PauseResumeSync(self, pause):
1914
    """Pauses or resumes the sync of a DRBD device.
1915

1916
    @param pause: Wether to pause or resume
1917
    @return: the success of the operation
1918

1919
    """
1920
    if self.minor is None:
1921
      logging.info("Not attached during PauseSync")
1922
      return False
1923

    
1924
    children_result = super(DRBD8, self).PauseResumeSync(pause)
1925

    
1926
    if pause:
1927
      cmd = "pause-sync"
1928
    else:
1929
      cmd = "resume-sync"
1930

    
1931
    result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1932
    if result.failed:
1933
      logging.error("Can't %s: %s - %s", cmd,
1934
                    result.fail_reason, result.output)
1935
    return not result.failed and children_result
1936

    
1937
  def GetProcStatus(self):
1938
    """Return device data from /proc.
1939

1940
    """
1941
    if self.minor is None:
1942
      _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1943
    proc_info = self._MassageProcData(self._GetProcData())
1944
    if self.minor not in proc_info:
1945
      _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1946
    return DRBD8Status(proc_info[self.minor])
1947

    
1948
  def GetSyncStatus(self):
1949
    """Returns the sync status of the device.
1950

1951

1952
    If sync_percent is None, it means all is ok
1953
    If estimated_time is None, it means we can't estimate
1954
    the time needed, otherwise it's the time left in seconds.
1955

1956

1957
    We set the is_degraded parameter to True on two conditions:
1958
    network not connected or local disk missing.
1959

1960
    We compute the ldisk parameter based on whether we have a local
1961
    disk or not.
1962

1963
    @rtype: objects.BlockDevStatus
1964

1965
    """
1966
    if self.minor is None and not self.Attach():
1967
      _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1968

    
1969
    stats = self.GetProcStatus()
1970
    is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1971

    
1972
    if stats.is_disk_uptodate:
1973
      ldisk_status = constants.LDS_OKAY
1974
    elif stats.is_diskless:
1975
      ldisk_status = constants.LDS_FAULTY
1976
    else:
1977
      ldisk_status = constants.LDS_UNKNOWN
1978

    
1979
    return objects.BlockDevStatus(dev_path=self.dev_path,
1980
                                  major=self.major,
1981
                                  minor=self.minor,
1982
                                  sync_percent=stats.sync_percent,
1983
                                  estimated_time=stats.est_time,
1984
                                  is_degraded=is_degraded,
1985
                                  ldisk_status=ldisk_status)
1986

    
1987
  def Open(self, force=False):
1988
    """Make the local state primary.
1989

1990
    If the 'force' parameter is given, the '-o' option is passed to
1991
    drbdsetup. Since this is a potentially dangerous operation, the
1992
    force flag should be only given after creation, when it actually
1993
    is mandatory.
1994

1995
    """
1996
    if self.minor is None and not self.Attach():
1997
      logging.error("DRBD cannot attach to a device during open")
1998
      return False
1999
    cmd = ["drbdsetup", self.dev_path, "primary"]
2000
    if force:
2001
      cmd.append("-o")
2002
    result = utils.RunCmd(cmd)
2003
    if result.failed:
2004
      _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2005
                  result.output)
2006

    
2007
  def Close(self):
2008
    """Make the local state secondary.
2009

2010
    This will, of course, fail if the device is in use.
2011

2012
    """
2013
    if self.minor is None and not self.Attach():
2014
      _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2015
    result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2016
    if result.failed:
2017
      _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2018
                  self.minor, result.output)
2019

    
2020
  def DisconnectNet(self):
2021
    """Removes network configuration.
2022

2023
    This method shutdowns the network side of the device.
2024

2025
    The method will wait up to a hardcoded timeout for the device to
2026
    go into standalone after the 'disconnect' command before
2027
    re-configuring it, as sometimes it takes a while for the
2028
    disconnect to actually propagate and thus we might issue a 'net'
2029
    command while the device is still connected. If the device will
2030
    still be attached to the network and we time out, we raise an
2031
    exception.
2032

2033
    """
2034
    if self.minor is None:
2035
      _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2036

    
2037
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2038
      _ThrowError("drbd%d: DRBD disk missing network info in"
2039
                  " DisconnectNet()", self.minor)
2040

    
2041
    class _DisconnectStatus:
2042
      def __init__(self, ever_disconnected):
2043
        self.ever_disconnected = ever_disconnected
2044

    
2045
    dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2046

    
2047
    def _WaitForDisconnect():
2048
      if self.GetProcStatus().is_standalone:
2049
        return
2050

    
2051
      # retry the disconnect, it seems possible that due to a well-time
2052
      # disconnect on the peer, my disconnect command might be ignored and
2053
      # forgotten
2054
      dstatus.ever_disconnected = \
2055
        _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2056

    
2057
      raise utils.RetryAgain()
2058

    
2059
    # Keep start time
2060
    start_time = time.time()
2061

    
2062
    try:
2063
      # Start delay at 100 milliseconds and grow up to 2 seconds
2064
      utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2065
                  self._NET_RECONFIG_TIMEOUT)
2066
    except utils.RetryTimeout:
2067
      if dstatus.ever_disconnected:
2068
        msg = ("drbd%d: device did not react to the"
2069
               " 'disconnect' command in a timely manner")
2070
      else:
2071
        msg = "drbd%d: can't shutdown network, even after multiple retries"
2072

    
2073
      _ThrowError(msg, self.minor)
2074

    
2075
    reconfig_time = time.time() - start_time
2076
    if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2077
      logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2078
                   self.minor, reconfig_time)
2079

    
2080
  def AttachNet(self, multimaster):
2081
    """Reconnects the network.
2082

2083
    This method connects the network side of the device with a
2084
    specified multi-master flag. The device needs to be 'Standalone'
2085
    but have valid network configuration data.
2086

2087
    Args:
2088
      - multimaster: init the network in dual-primary mode
2089

2090
    """
2091
    if self.minor is None:
2092
      _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2093

    
2094
    if None in (self._lhost, self._lport, self._rhost, self._rport):
2095
      _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2096

    
2097
    status = self.GetProcStatus()
2098

    
2099
    if not status.is_standalone:
2100
      _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2101

    
2102
    self._AssembleNet(self.minor,
2103
                      (self._lhost, self._lport, self._rhost, self._rport),
2104
                      constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2105
                      hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2106

    
2107
  def Attach(self):
2108
    """Check if our minor is configured.
2109

2110
    This doesn't do any device configurations - it only checks if the
2111
    minor is in a state different from Unconfigured.
2112

2113
    Note that this function will not change the state of the system in
2114
    any way (except in case of side-effects caused by reading from
2115
    /proc).
2116

2117
    """
2118
    used_devs = self.GetUsedDevs()
2119
    if self._aminor in used_devs:
2120
      minor = self._aminor
2121
    else:
2122
      minor = None
2123

    
2124
    self._SetFromMinor(minor)
2125
    return minor is not None
2126

    
2127
  def Assemble(self):
2128
    """Assemble the drbd.
2129

2130
    Method:
2131
      - if we have a configured device, we try to ensure that it matches
2132
        our config
2133
      - if not, we create it from zero
2134
      - anyway, set the device parameters
2135

2136
    """
2137
    super(DRBD8, self).Assemble()
2138

    
2139
    self.Attach()
2140
    if self.minor is None:
2141
      # local device completely unconfigured
2142
      self._FastAssemble()
2143
    else:
2144
      # we have to recheck the local and network status and try to fix
2145
      # the device
2146
      self._SlowAssemble()
2147

    
2148
    sync_errors = self.SetSyncParams(self.params)
2149
    if sync_errors:
2150
      _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2151
                  (self.minor, utils.CommaJoin(sync_errors)))
2152

    
2153
  def _SlowAssemble(self):
2154
    """Assembles the DRBD device from a (partially) configured device.
2155

2156
    In case of partially attached (local device matches but no network
2157
    setup), we perform the network attach. If successful, we re-test
2158
    the attach if can return success.
2159

2160
    """
2161
    # TODO: Rewrite to not use a for loop just because there is 'break'
2162
    # pylint: disable=W0631
2163
    net_data = (self._lhost, self._lport, self._rhost, self._rport)
2164
    for minor in (self._aminor,):
2165
      info = self._GetDevInfo(self._GetShowData(minor))
2166
      match_l = self._MatchesLocal(info)
2167
      match_r = self._MatchesNet(info)
2168

    
2169
      if match_l and match_r:
2170
        # everything matches
2171
        break
2172

    
2173
      if match_l and not match_r and "local_addr" not in info:
2174
        # disk matches, but not attached to network, attach and recheck
2175
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2176
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2177
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2178
          break
2179
        else:
2180
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2181
                      " show' disagrees", minor)
2182

    
2183
      if match_r and "local_dev" not in info:
2184
        # no local disk, but network attached and it matches
2185
        self._AssembleLocal(minor, self._children[0].dev_path,
2186
                            self._children[1].dev_path, self.size)
2187
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2188
          break
2189
        else:
2190
          _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2191
                      " show' disagrees", minor)
2192

    
2193
      # this case must be considered only if we actually have local
2194
      # storage, i.e. not in diskless mode, because all diskless
2195
      # devices are equal from the point of view of local
2196
      # configuration
2197
      if (match_l and "local_dev" in info and
2198
          not match_r and "local_addr" in info):
2199
        # strange case - the device network part points to somewhere
2200
        # else, even though its local storage is ours; as we own the
2201
        # drbd space, we try to disconnect from the remote peer and
2202
        # reconnect to our correct one
2203
        try:
2204
          self._ShutdownNet(minor)
2205
        except errors.BlockDeviceError, err:
2206
          _ThrowError("drbd%d: device has correct local storage, wrong"
2207
                      " remote peer and is unable to disconnect in order"
2208
                      " to attach to the correct peer: %s", minor, str(err))
2209
        # note: _AssembleNet also handles the case when we don't want
2210
        # local storage (i.e. one or more of the _[lr](host|port) is
2211
        # None)
2212
        self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2213
                          hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2214
        if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2215
          break
2216
        else:
2217
          _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2218
                      " show' disagrees", minor)
2219

    
2220
    else:
2221
      minor = None
2222

    
2223
    self._SetFromMinor(minor)
2224
    if minor is None:
2225
      _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2226
                  self._aminor)
2227

    
2228
  def _FastAssemble(self):
2229
    """Assemble the drbd device from zero.
2230

2231
    This is run when in Assemble we detect our minor is unused.
2232

2233
    """
2234
    minor = self._aminor
2235
    if self._children and self._children[0] and self._children[1]:
2236
      self._AssembleLocal(minor, self._children[0].dev_path,
2237
                          self._children[1].dev_path, self.size)
2238
    if self._lhost and self._lport and self._rhost and self._rport:
2239
      self._AssembleNet(minor,
2240
                        (self._lhost, self._lport, self._rhost, self._rport),
2241
                        constants.DRBD_NET_PROTOCOL,
2242
                        hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2243
    self._SetFromMinor(minor)
2244

    
2245
  @classmethod
2246
  def _ShutdownLocal(cls, minor):
2247
    """Detach from the local device.
2248

2249
    I/Os will continue to be served from the remote device. If we
2250
    don't have a remote device, this operation will fail.
2251

2252
    """
2253
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2254
    if result.failed:
2255
      _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2256

    
2257
  @classmethod
2258
  def _ShutdownNet(cls, minor):
2259
    """Disconnect from the remote peer.
2260

2261
    This fails if we don't have a local device.
2262

2263
    """
2264
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2265
    if result.failed:
2266
      _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2267

    
2268
  @classmethod
2269
  def _ShutdownAll(cls, minor):
2270
    """Deactivate the device.
2271

2272
    This will, of course, fail if the device is in use.
2273

2274
    """
2275
    result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2276
    if result.failed:
2277
      _ThrowError("drbd%d: can't shutdown drbd device: %s",
2278
                  minor, result.output)
2279

    
2280
  def Shutdown(self):
2281
    """Shutdown the DRBD device.
2282

2283
    """
2284
    if self.minor is None and not self.Attach():
2285
      logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2286
      return
2287
    minor = self.minor
2288
    self.minor = None
2289
    self.dev_path = None
2290
    self._ShutdownAll(minor)
2291

    
2292
  def Remove(self):
2293
    """Stub remove for DRBD devices.
2294

2295
    """
2296
    self.Shutdown()
2297

    
2298
  @classmethod
2299
  def Create(cls, unique_id, children, size, params, excl_stor):
2300
    """Create a new DRBD8 device.
2301

2302
    Since DRBD devices are not created per se, just assembled, this
2303
    function only initializes the metadata.
2304

2305
    """
2306
    if len(children) != 2:
2307
      raise errors.ProgrammerError("Invalid setup for the drbd device")
2308
    if excl_stor:
2309
      raise errors.ProgrammerError("DRBD device requested with"
2310
                                   " exclusive_storage")
2311
    # check that the minor is unused
2312
    aminor = unique_id[4]
2313
    proc_info = cls._MassageProcData(cls._GetProcData())
2314
    if aminor in proc_info:
2315
      status = DRBD8Status(proc_info[aminor])
2316
      in_use = status.is_in_use
2317
    else:
2318
      in_use = False
2319
    if in_use:
2320
      _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2321
    meta = children[1]
2322
    meta.Assemble()
2323
    if not meta.Attach():
2324
      _ThrowError("drbd%d: can't attach to meta device '%s'",
2325
                  aminor, meta)
2326
    cls._CheckMetaSize(meta.dev_path)
2327
    cls._InitMeta(aminor, meta.dev_path)
2328
    return cls(unique_id, children, size, params)
2329

    
2330
  def Grow(self, amount, dryrun, backingstore):
2331
    """Resize the DRBD device and its backing storage.
2332

2333
    """
2334
    if self.minor is None:
2335
      _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2336
    if len(self._children) != 2 or None in self._children:
2337
      _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2338
    self._children[0].Grow(amount, dryrun, backingstore)
2339
    if dryrun or backingstore:
2340
      # DRBD does not support dry-run mode and is not backing storage,
2341
      # so we'll return here
2342
      return
2343
    result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2344
                           "%dm" % (self.size + amount)])
2345
    if result.failed:
2346
      _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2347

    
2348

    
2349
class FileStorage(BlockDev):
2350
  """File device.
2351

2352
  This class represents the a file storage backend device.
2353

2354
  The unique_id for the file device is a (file_driver, file_path) tuple.
2355

2356
  """
2357
  def __init__(self, unique_id, children, size, params):
2358
    """Initalizes a file device backend.
2359

2360
    """
2361
    if children:
2362
      raise errors.BlockDeviceError("Invalid setup for file device")
2363
    super(FileStorage, self).__init__(unique_id, children, size, params)
2364
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2365
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2366
    self.driver = unique_id[0]
2367
    self.dev_path = unique_id[1]
2368

    
2369
    CheckFileStoragePath(self.dev_path)
2370

    
2371
    self.Attach()
2372

    
2373
  def Assemble(self):
2374
    """Assemble the device.
2375

2376
    Checks whether the file device exists, raises BlockDeviceError otherwise.
2377

2378
    """
2379
    if not os.path.exists(self.dev_path):
2380
      _ThrowError("File device '%s' does not exist" % self.dev_path)
2381

    
2382
  def Shutdown(self):
2383
    """Shutdown the device.
2384

2385
    This is a no-op for the file type, as we don't deactivate
2386
    the file on shutdown.
2387

2388
    """
2389
    pass
2390

    
2391
  def Open(self, force=False):
2392
    """Make the device ready for I/O.
2393

2394
    This is a no-op for the file type.
2395

2396
    """
2397
    pass
2398

    
2399
  def Close(self):
2400
    """Notifies that the device will no longer be used for I/O.
2401

2402
    This is a no-op for the file type.
2403

2404
    """
2405
    pass
2406

    
2407
  def Remove(self):
2408
    """Remove the file backing the block device.
2409

2410
    @rtype: boolean
2411
    @return: True if the removal was successful
2412

2413
    """
2414
    try:
2415
      os.remove(self.dev_path)
2416
    except OSError, err:
2417
      if err.errno != errno.ENOENT:
2418
        _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2419

    
2420
  def Rename(self, new_id):
2421
    """Renames the file.
2422

2423
    """
2424
    # TODO: implement rename for file-based storage
2425
    _ThrowError("Rename is not supported for file-based storage")
2426

    
2427
  def Grow(self, amount, dryrun, backingstore):
2428
    """Grow the file
2429

2430
    @param amount: the amount (in mebibytes) to grow with
2431

2432
    """
2433
    if not backingstore:
2434
      return
2435
    # Check that the file exists
2436
    self.Assemble()
2437
    current_size = self.GetActualSize()
2438
    new_size = current_size + amount * 1024 * 1024
2439
    assert new_size > current_size, "Cannot Grow with a negative amount"
2440
    # We can't really simulate the growth
2441
    if dryrun:
2442
      return
2443
    try:
2444
      f = open(self.dev_path, "a+")
2445
      f.truncate(new_size)
2446
      f.close()
2447
    except EnvironmentError, err:
2448
      _ThrowError("Error in file growth: %", str(err))
2449

    
2450
  def Attach(self):
2451
    """Attach to an existing file.
2452

2453
    Check if this file already exists.
2454

2455
    @rtype: boolean
2456
    @return: True if file exists
2457

2458
    """
2459
    self.attached = os.path.exists(self.dev_path)
2460
    return self.attached
2461

    
2462
  def GetActualSize(self):
2463
    """Return the actual disk size.
2464

2465
    @note: the device needs to be active when this is called
2466

2467
    """
2468
    assert self.attached, "BlockDevice not attached in GetActualSize()"
2469
    try:
2470
      st = os.stat(self.dev_path)
2471
      return st.st_size
2472
    except OSError, err:
2473
      _ThrowError("Can't stat %s: %s", self.dev_path, err)
2474

    
2475
  @classmethod
2476
  def Create(cls, unique_id, children, size, params, excl_stor):
2477
    """Create a new file.
2478

2479
    @param size: the size of file in MiB
2480

2481
    @rtype: L{bdev.FileStorage}
2482
    @return: an instance of FileStorage
2483

2484
    """
2485
    if excl_stor:
2486
      raise errors.ProgrammerError("FileStorage device requested with"
2487
                                   " exclusive_storage")
2488
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2489
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2490

    
2491
    dev_path = unique_id[1]
2492

    
2493
    CheckFileStoragePath(dev_path)
2494

    
2495
    try:
2496
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2497
      f = os.fdopen(fd, "w")
2498
      f.truncate(size * 1024 * 1024)
2499
      f.close()
2500
    except EnvironmentError, err:
2501
      if err.errno == errno.EEXIST:
2502
        _ThrowError("File already existing: %s", dev_path)
2503
      _ThrowError("Error in file creation: %", str(err))
2504

    
2505
    return FileStorage(unique_id, children, size, params)
2506

    
2507

    
2508
class PersistentBlockDevice(BlockDev):
2509
  """A block device with persistent node
2510

2511
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
2512
  udev helpers are probably required to give persistent, human-friendly
2513
  names.
2514

2515
  For the time being, pathnames are required to lie under /dev.
2516

2517
  """
2518
  def __init__(self, unique_id, children, size, params):
2519
    """Attaches to a static block device.
2520

2521
    The unique_id is a path under /dev.
2522

2523
    """
2524
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2525
                                                params)
2526
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2527
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2528
    self.dev_path = unique_id[1]
2529
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
2530
      raise ValueError("Full path '%s' lies outside /dev" %
2531
                              os.path.realpath(self.dev_path))
2532
    # TODO: this is just a safety guard checking that we only deal with devices
2533
    # we know how to handle. In the future this will be integrated with
2534
    # external storage backends and possible values will probably be collected
2535
    # from the cluster configuration.
2536
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2537
      raise ValueError("Got persistent block device of invalid type: %s" %
2538
                       unique_id[0])
2539

    
2540
    self.major = self.minor = None
2541
    self.Attach()
2542

    
2543
  @classmethod
2544
  def Create(cls, unique_id, children, size, params, excl_stor):
2545
    """Create a new device
2546

2547
    This is a noop, we only return a PersistentBlockDevice instance
2548

2549
    """
2550
    if excl_stor:
2551
      raise errors.ProgrammerError("Persistent block device requested with"
2552
                                   " exclusive_storage")
2553
    return PersistentBlockDevice(unique_id, children, 0, params)
2554

    
2555
  def Remove(self):
2556
    """Remove a device
2557

2558
    This is a noop
2559

2560
    """
2561
    pass
2562

    
2563
  def Rename(self, new_id):
2564
    """Rename this device.
2565

2566
    """
2567
    _ThrowError("Rename is not supported for PersistentBlockDev storage")
2568

    
2569
  def Attach(self):
2570
    """Attach to an existing block device.
2571

2572

2573
    """
2574
    self.attached = False
2575
    try:
2576
      st = os.stat(self.dev_path)
2577
    except OSError, err:
2578
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2579
      return False
2580

    
2581
    if not stat.S_ISBLK(st.st_mode):
2582
      logging.error("%s is not a block device", self.dev_path)
2583
      return False
2584

    
2585
    self.major = os.major(st.st_rdev)
2586
    self.minor = os.minor(st.st_rdev)
2587
    self.attached = True
2588

    
2589
    return True
2590

    
2591
  def Assemble(self):
2592
    """Assemble the device.
2593

2594
    """
2595
    pass
2596

    
2597
  def Shutdown(self):
2598
    """Shutdown the device.
2599

2600
    """
2601
    pass
2602

    
2603
  def Open(self, force=False):
2604
    """Make the device ready for I/O.
2605

2606
    """
2607
    pass
2608

    
2609
  def Close(self):
2610
    """Notifies that the device will no longer be used for I/O.
2611

2612
    """
2613
    pass
2614

    
2615
  def Grow(self, amount, dryrun, backingstore):
2616
    """Grow the logical volume.
2617

2618
    """
2619
    _ThrowError("Grow is not supported for PersistentBlockDev storage")
2620

    
2621

    
2622
class RADOSBlockDevice(BlockDev):
2623
  """A RADOS Block Device (rbd).
2624

2625
  This class implements the RADOS Block Device for the backend. You need
2626
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2627
  this to be functional.
2628

2629
  """
2630
  def __init__(self, unique_id, children, size, params):
2631
    """Attaches to an rbd device.
2632

2633
    """
2634
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2635
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2636
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2637

    
2638
    self.driver, self.rbd_name = unique_id
2639

    
2640
    self.major = self.minor = None
2641
    self.Attach()
2642

    
2643
  @classmethod
2644
  def Create(cls, unique_id, children, size, params, excl_stor):
2645
    """Create a new rbd device.
2646

2647
    Provision a new rbd volume inside a RADOS pool.
2648

2649
    """
2650
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2651
      raise errors.ProgrammerError("Invalid configuration data %s" %
2652
                                   str(unique_id))
2653
    if excl_stor:
2654
      raise errors.ProgrammerError("RBD device requested with"
2655
                                   " exclusive_storage")
2656
    rbd_pool = params[constants.LDP_POOL]
2657
    rbd_name = unique_id[1]
2658

    
2659
    # Provision a new rbd volume (Image) inside the RADOS cluster.
2660
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2661
           rbd_name, "--size", "%s" % size]
2662
    result = utils.RunCmd(cmd)
2663
    if result.failed:
2664
      _ThrowError("rbd creation failed (%s): %s",
2665
                  result.fail_reason, result.output)
2666

    
2667
    return RADOSBlockDevice(unique_id, children, size, params)
2668

    
2669
  def Remove(self):
2670
    """Remove the rbd device.
2671

2672
    """
2673
    rbd_pool = self.params[constants.LDP_POOL]
2674
    rbd_name = self.unique_id[1]
2675

    
2676
    if not self.minor and not self.Attach():
2677
      # The rbd device doesn't exist.
2678
      return
2679

    
2680
    # First shutdown the device (remove mappings).
2681
    self.Shutdown()
2682

    
2683
    # Remove the actual Volume (Image) from the RADOS cluster.
2684
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2685
    result = utils.RunCmd(cmd)
2686
    if result.failed:
2687
      _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2688
                  result.fail_reason, result.output)
2689

    
2690
  def Rename(self, new_id):
2691
    """Rename this device.
2692

2693
    """
2694
    pass
2695

    
2696
  def Attach(self):
2697
    """Attach to an existing rbd device.
2698

2699
    This method maps the rbd volume that matches our name with
2700
    an rbd device and then attaches to this device.
2701

2702
    """
2703
    self.attached = False
2704

    
2705
    # Map the rbd volume to a block device under /dev
2706
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2707

    
2708
    try:
2709
      st = os.stat(self.dev_path)
2710
    except OSError, err:
2711
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2712
      return False
2713

    
2714
    if not stat.S_ISBLK(st.st_mode):
2715
      logging.error("%s is not a block device", self.dev_path)
2716
      return False
2717

    
2718
    self.major = os.major(st.st_rdev)
2719
    self.minor = os.minor(st.st_rdev)
2720
    self.attached = True
2721

    
2722
    return True
2723

    
2724
  def _MapVolumeToBlockdev(self, unique_id):
2725
    """Maps existing rbd volumes to block devices.
2726

2727
    This method should be idempotent if the mapping already exists.
2728

2729
    @rtype: string
2730
    @return: the block device path that corresponds to the volume
2731

2732
    """
2733
    pool = self.params[constants.LDP_POOL]
2734
    name = unique_id[1]
2735

    
2736
    # Check if the mapping already exists.
2737
    rbd_dev = self._VolumeToBlockdev(pool, name)
2738
    if rbd_dev:
2739
      # The mapping exists. Return it.
2740
      return rbd_dev
2741

    
2742
    # The mapping doesn't exist. Create it.
2743
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2744
    result = utils.RunCmd(map_cmd)
2745
    if result.failed:
2746
      _ThrowError("rbd map failed (%s): %s",
2747
                  result.fail_reason, result.output)
2748

    
2749
    # Find the corresponding rbd device.
2750
    rbd_dev = self._VolumeToBlockdev(pool, name)
2751
    if not rbd_dev:
2752
      _ThrowError("rbd map succeeded, but could not find the rbd block"
2753
                  " device in output of showmapped, for volume: %s", name)
2754

    
2755
    # The device was successfully mapped. Return it.
2756
    return rbd_dev
2757

    
2758
  @classmethod
2759
  def _VolumeToBlockdev(cls, pool, volume_name):
2760
    """Do the 'volume name'-to-'rbd block device' resolving.
2761

2762
    @type pool: string
2763
    @param pool: RADOS pool to use
2764
    @type volume_name: string
2765
    @param volume_name: the name of the volume whose device we search for
2766
    @rtype: string or None
2767
    @return: block device path if the volume is mapped, else None
2768

2769
    """
2770
    try:
2771
      # Newer versions of the rbd tool support json output formatting. Use it
2772
      # if available.
2773
      showmap_cmd = [
2774
        constants.RBD_CMD,
2775
        "showmapped",
2776
        "-p",
2777
        pool,
2778
        "--format",
2779
        "json"
2780
        ]
2781
      result = utils.RunCmd(showmap_cmd)
2782
      if result.failed:
2783
        logging.error("rbd JSON output formatting returned error (%s): %s,"
2784
                      "falling back to plain output parsing",
2785
                      result.fail_reason, result.output)
2786
        raise RbdShowmappedJsonError
2787

    
2788
      return cls._ParseRbdShowmappedJson(result.output, volume_name)
2789
    except RbdShowmappedJsonError:
2790
      # For older versions of rbd, we have to parse the plain / text output
2791
      # manually.
2792
      showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2793
      result = utils.RunCmd(showmap_cmd)
2794
      if result.failed:
2795
        _ThrowError("rbd showmapped failed (%s): %s",
2796
                    result.fail_reason, result.output)
2797

    
2798
      return cls._ParseRbdShowmappedPlain(result.output, volume_name)
2799

    
2800
  @staticmethod
2801
  def _ParseRbdShowmappedJson(output, volume_name):
2802
    """Parse the json output of `rbd showmapped'.
2803

2804
    This method parses the json output of `rbd showmapped' and returns the rbd
2805
    block device path (e.g. /dev/rbd0) that matches the given rbd volume.
2806

2807
    @type output: string
2808
    @param output: the json output of `rbd showmapped'
2809
    @type volume_name: string
2810
    @param volume_name: the name of the volume whose device we search for
2811
    @rtype: string or None
2812
    @return: block device path if the volume is mapped, else None
2813

2814
    """
2815
    try:
2816
      devices = serializer.LoadJson(output)
2817
    except ValueError, err:
2818
      _ThrowError("Unable to parse JSON data: %s" % err)
2819

    
2820
    rbd_dev = None
2821
    for d in devices.values(): # pylint: disable=E1103
2822
      try:
2823
        name = d["name"]
2824
      except KeyError:
2825
        _ThrowError("'name' key missing from json object %s", devices)
2826

    
2827
      if name == volume_name:
2828
        if rbd_dev is not None:
2829
          _ThrowError("rbd volume %s is mapped more than once", volume_name)
2830

    
2831
        rbd_dev = d["device"]
2832

    
2833
    return rbd_dev
2834

    
2835
  @staticmethod
2836
  def _ParseRbdShowmappedPlain(output, volume_name):
2837
    """Parse the (plain / text) output of `rbd showmapped'.
2838

2839
    This method parses the output of `rbd showmapped' and returns
2840
    the rbd block device path (e.g. /dev/rbd0) that matches the
2841
    given rbd volume.
2842

2843
    @type output: string
2844
    @param output: the plain text output of `rbd showmapped'
2845
    @type volume_name: string
2846
    @param volume_name: the name of the volume whose device we search for
2847
    @rtype: string or None
2848
    @return: block device path if the volume is mapped, else None
2849

2850
    """
2851
    allfields = 5
2852
    volumefield = 2
2853
    devicefield = 4
2854

    
2855
    lines = output.splitlines()
2856

    
2857
    # Try parsing the new output format (ceph >= 0.55).
2858
    splitted_lines = map(lambda l: l.split(), lines)
2859

    
2860
    # Check for empty output.
2861
    if not splitted_lines:
2862
      return None
2863

    
2864
    # Check showmapped output, to determine number of fields.
2865
    field_cnt = len(splitted_lines[0])
2866
    if field_cnt != allfields:
2867
      # Parsing the new format failed. Fallback to parsing the old output
2868
      # format (< 0.55).
2869
      splitted_lines = map(lambda l: l.split("\t"), lines)
2870
      if field_cnt != allfields:
2871
        _ThrowError("Cannot parse rbd showmapped output expected %s fields,"
2872
                    " found %s", allfields, field_cnt)
2873

    
2874
    matched_lines = \
2875
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2876
             splitted_lines)
2877

    
2878
    if len(matched_lines) > 1:
2879
      _ThrowError("rbd volume %s mapped more than once", volume_name)
2880

    
2881
    if matched_lines:
2882
      # rbd block device found. Return it.
2883
      rbd_dev = matched_lines[0][devicefield]
2884
      return rbd_dev
2885

    
2886
    # The given volume is not mapped.
2887
    return None
2888

    
2889
  def Assemble(self):
2890
    """Assemble the device.
2891

2892
    """
2893
    pass
2894

    
2895
  def Shutdown(self):
2896
    """Shutdown the device.
2897

2898
    """
2899
    if not self.minor and not self.Attach():
2900
      # The rbd device doesn't exist.
2901
      return
2902

    
2903
    # Unmap the block device from the Volume.
2904
    self._UnmapVolumeFromBlockdev(self.unique_id)
2905

    
2906
    self.minor = None
2907
    self.dev_path = None
2908

    
2909
  def _UnmapVolumeFromBlockdev(self, unique_id):
2910
    """Unmaps the rbd device from the Volume it is mapped.
2911

2912
    Unmaps the rbd device from the Volume it was previously mapped to.
2913
    This method should be idempotent if the Volume isn't mapped.
2914

2915
    """
2916
    pool = self.params[constants.LDP_POOL]
2917
    name = unique_id[1]
2918

    
2919
    # Check if the mapping already exists.
2920
    rbd_dev = self._VolumeToBlockdev(pool, name)
2921

    
2922
    if rbd_dev:
2923
      # The mapping exists. Unmap the rbd device.
2924
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2925
      result = utils.RunCmd(unmap_cmd)
2926
      if result.failed:
2927
        _ThrowError("rbd unmap failed (%s): %s",
2928
                    result.fail_reason, result.output)
2929

    
2930
  def Open(self, force=False):
2931
    """Make the device ready for I/O.
2932

2933
    """
2934
    pass
2935

    
2936
  def Close(self):
2937
    """Notifies that the device will no longer be used for I/O.
2938

2939
    """
2940
    pass
2941

    
2942
  def Grow(self, amount, dryrun, backingstore):
2943
    """Grow the Volume.
2944

2945
    @type amount: integer
2946
    @param amount: the amount (in mebibytes) to grow with
2947
    @type dryrun: boolean
2948
    @param dryrun: whether to execute the operation in simulation mode
2949
        only, without actually increasing the size
2950

2951
    """
2952
    if not backingstore:
2953
      return
2954
    if not self.Attach():
2955
      _ThrowError("Can't attach to rbd device during Grow()")
2956

    
2957
    if dryrun:
2958
      # the rbd tool does not support dry runs of resize operations.
2959
      # Since rbd volumes are thinly provisioned, we assume
2960
      # there is always enough free space for the operation.
2961
      return
2962

    
2963
    rbd_pool = self.params[constants.LDP_POOL]
2964
    rbd_name = self.unique_id[1]
2965
    new_size = self.size + amount
2966

    
2967
    # Resize the rbd volume (Image) inside the RADOS cluster.
2968
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2969
           rbd_name, "--size", "%s" % new_size]
2970
    result = utils.RunCmd(cmd)
2971
    if result.failed:
2972
      _ThrowError("rbd resize failed (%s): %s",
2973
                  result.fail_reason, result.output)
2974

    
2975

    
2976
class ExtStorageDevice(BlockDev):
2977
  """A block device provided by an ExtStorage Provider.
2978

2979
  This class implements the External Storage Interface, which means
2980
  handling of the externally provided block devices.
2981

2982
  """
2983
  def __init__(self, unique_id, children, size, params):
2984
    """Attaches to an extstorage block device.
2985

2986
    """
2987
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2988
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2989
      raise ValueError("Invalid configuration data %s" % str(unique_id))
2990

    
2991
    self.driver, self.vol_name = unique_id
2992
    self.ext_params = params
2993

    
2994
    self.major = self.minor = None
2995
    self.Attach()
2996

    
2997
  @classmethod
2998
  def Create(cls, unique_id, children, size, params, excl_stor):
2999
    """Create a new extstorage device.
3000

3001
    Provision a new volume using an extstorage provider, which will
3002
    then be mapped to a block device.
3003

3004
    """
3005
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
3006
      raise errors.ProgrammerError("Invalid configuration data %s" %
3007
                                   str(unique_id))
3008
    if excl_stor:
3009
      raise errors.ProgrammerError("extstorage device requested with"
3010
                                   " exclusive_storage")
3011

    
3012
    # Call the External Storage's create script,
3013
    # to provision a new Volume inside the External Storage
3014
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
3015
                      params, str(size))
3016

    
3017
    return ExtStorageDevice(unique_id, children, size, params)
3018

    
3019
  def Remove(self):
3020
    """Remove the extstorage device.
3021

3022
    """
3023
    if not self.minor and not self.Attach():
3024
      # The extstorage device doesn't exist.
3025
      return
3026

    
3027
    # First shutdown the device (remove mappings).
3028
    self.Shutdown()
3029

    
3030
    # Call the External Storage's remove script,
3031
    # to remove the Volume from the External Storage
3032
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
3033
                      self.ext_params)
3034

    
3035
  def Rename(self, new_id):
3036
    """Rename this device.
3037

3038
    """
3039
    pass
3040

    
3041
  def Attach(self):
3042
    """Attach to an existing extstorage device.
3043

3044
    This method maps the extstorage volume that matches our name with
3045
    a corresponding block device and then attaches to this device.
3046

3047
    """
3048
    self.attached = False
3049

    
3050
    # Call the External Storage's attach script,
3051
    # to attach an existing Volume to a block device under /dev
3052
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
3053
                                      self.unique_id, self.ext_params)
3054

    
3055
    try:
3056
      st = os.stat(self.dev_path)
3057
    except OSError, err:
3058
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
3059
      return False
3060

    
3061
    if not stat.S_ISBLK(st.st_mode):
3062
      logging.error("%s is not a block device", self.dev_path)
3063
      return False
3064

    
3065
    self.major = os.major(st.st_rdev)
3066
    self.minor = os.minor(st.st_rdev)
3067
    self.attached = True
3068

    
3069
    return True
3070

    
3071
  def Assemble(self):
3072
    """Assemble the device.
3073

3074
    """
3075
    pass
3076

    
3077
  def Shutdown(self):
3078
    """Shutdown the device.
3079

3080
    """
3081
    if not self.minor and not self.Attach():
3082
      # The extstorage device doesn't exist.
3083
      return
3084

    
3085
    # Call the External Storage's detach script,
3086
    # to detach an existing Volume from it's block device under /dev
3087
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3088
                      self.ext_params)
3089

    
3090
    self.minor = None
3091
    self.dev_path = None
3092

    
3093
  def Open(self, force=False):
3094
    """Make the device ready for I/O.
3095

3096
    """
3097
    pass
3098

    
3099
  def Close(self):
3100
    """Notifies that the device will no longer be used for I/O.
3101

3102
    """
3103
    pass
3104

    
3105
  def Grow(self, amount, dryrun, backingstore):
3106
    """Grow the Volume.
3107

3108
    @type amount: integer
3109
    @param amount: the amount (in mebibytes) to grow with
3110
    @type dryrun: boolean
3111
    @param dryrun: whether to execute the operation in simulation mode
3112
        only, without actually increasing the size
3113

3114
    """
3115
    if not backingstore:
3116
      return
3117
    if not self.Attach():
3118
      _ThrowError("Can't attach to extstorage device during Grow()")
3119

    
3120
    if dryrun:
3121
      # we do not support dry runs of resize operations for now.
3122
      return
3123

    
3124
    new_size = self.size + amount
3125

    
3126
    # Call the External Storage's grow script,
3127
    # to grow an existing Volume inside the External Storage
3128
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3129
                      self.ext_params, str(self.size), grow=str(new_size))
3130

    
3131
  def SetInfo(self, text):
3132
    """Update metadata with info text.
3133

3134
    """
3135
    # Replace invalid characters
3136
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
3137
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
3138

    
3139
    # Only up to 128 characters are allowed
3140
    text = text[:128]
3141

    
3142
    # Call the External Storage's setinfo script,
3143
    # to set metadata for an existing Volume inside the External Storage
3144
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
3145
                      self.ext_params, metadata=text)
3146

    
3147

    
3148
def _ExtStorageAction(action, unique_id, ext_params,
3149
                      size=None, grow=None, metadata=None):
3150
  """Take an External Storage action.
3151

3152
  Take an External Storage action concerning or affecting
3153
  a specific Volume inside the External Storage.
3154

3155
  @type action: string
3156
  @param action: which action to perform. One of:
3157
                 create / remove / grow / attach / detach
3158
  @type unique_id: tuple (driver, vol_name)
3159
  @param unique_id: a tuple containing the type of ExtStorage (driver)
3160
                    and the Volume name
3161
  @type ext_params: dict
3162
  @param ext_params: ExtStorage parameters
3163
  @type size: integer
3164
  @param size: the size of the Volume in mebibytes
3165
  @type grow: integer
3166
  @param grow: the new size in mebibytes (after grow)
3167
  @type metadata: string
3168
  @param metadata: metadata info of the Volume, for use by the provider
3169
  @rtype: None or a block device path (during attach)
3170

3171
  """
3172
  driver, vol_name = unique_id
3173

    
3174
  # Create an External Storage instance of type `driver'
3175
  status, inst_es = ExtStorageFromDisk(driver)
3176
  if not status:
3177
    _ThrowError("%s" % inst_es)
3178

    
3179
  # Create the basic environment for the driver's scripts
3180
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3181
                                      grow, metadata)
3182

    
3183
  # Do not use log file for action `attach' as we need
3184
  # to get the output from RunResult
3185
  # TODO: find a way to have a log file for attach too
3186
  logfile = None
3187
  if action is not constants.ES_ACTION_ATTACH:
3188
    logfile = _VolumeLogName(action, driver, vol_name)
3189

    
3190
  # Make sure the given action results in a valid script
3191
  if action not in constants.ES_SCRIPTS:
3192
    _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3193
                action)
3194

    
3195
  # Find out which external script to run according the given action
3196
  script_name = action + "_script"
3197
  script = getattr(inst_es, script_name)
3198

    
3199
  # Run the external script
3200
  result = utils.RunCmd([script], env=create_env,
3201
                        cwd=inst_es.path, output=logfile,)
3202
  if result.failed:
3203
    logging.error("External storage's %s command '%s' returned"
3204
                  " error: %s, logfile: %s, output: %s",
3205
                  action, result.cmd, result.fail_reason,
3206
                  logfile, result.output)
3207

    
3208
    # If logfile is 'None' (during attach), it breaks TailFile
3209
    # TODO: have a log file for attach too
3210
    if action is not constants.ES_ACTION_ATTACH:
3211
      lines = [utils.SafeEncode(val)
3212
               for val in utils.TailFile(logfile, lines=20)]
3213
    else:
3214
      lines = result.output[-20:]
3215

    
3216
    _ThrowError("External storage's %s script failed (%s), last"
3217
                " lines of output:\n%s",
3218
                action, result.fail_reason, "\n".join(lines))
3219

    
3220
  if action == constants.ES_ACTION_ATTACH:
3221
    return result.stdout
3222

    
3223

    
3224
def ExtStorageFromDisk(name, base_dir=None):
3225
  """Create an ExtStorage instance from disk.
3226

3227
  This function will return an ExtStorage instance
3228
  if the given name is a valid ExtStorage name.
3229

3230
  @type base_dir: string
3231
  @keyword base_dir: Base directory containing ExtStorage installations.
3232
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
3233
  @rtype: tuple
3234
  @return: True and the ExtStorage instance if we find a valid one, or
3235
      False and the diagnose message on error
3236

3237
  """
3238
  if base_dir is None:
3239
    es_base_dir = pathutils.ES_SEARCH_PATH
3240
  else:
3241
    es_base_dir = [base_dir]
3242

    
3243
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3244

    
3245
  if es_dir is None:
3246
    return False, ("Directory for External Storage Provider %s not"
3247
                   " found in search path" % name)
3248

    
3249
  # ES Files dictionary, we will populate it with the absolute path
3250
  # names; if the value is True, then it is a required file, otherwise
3251
  # an optional one
3252
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3253

    
3254
  es_files[constants.ES_PARAMETERS_FILE] = True
3255

    
3256
  for (filename, _) in es_files.items():
3257
    es_files[filename] = utils.PathJoin(es_dir, filename)
3258

    
3259
    try:
3260
      st = os.stat(es_files[filename])
3261
    except EnvironmentError, err:
3262
      return False, ("File '%s' under path '%s' is missing (%s)" %
3263
                     (filename, es_dir, utils.ErrnoOrStr(err)))
3264

    
3265
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3266
      return False, ("File '%s' under path '%s' is not a regular file" %
3267
                     (filename, es_dir))
3268

    
3269
    if filename in constants.ES_SCRIPTS:
3270
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3271
        return False, ("File '%s' under path '%s' is not executable" %
3272
                       (filename, es_dir))
3273

    
3274
  parameters = []
3275
  if constants.ES_PARAMETERS_FILE in es_files:
3276
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3277
    try:
3278
      parameters = utils.ReadFile(parameters_file).splitlines()
3279
    except EnvironmentError, err:
3280
      return False, ("Error while reading the EXT parameters file at %s: %s" %
3281
                     (parameters_file, utils.ErrnoOrStr(err)))
3282
    parameters = [v.split(None, 1) for v in parameters]
3283

    
3284
  es_obj = \
3285
    objects.ExtStorage(name=name, path=es_dir,
3286
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
3287
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3288
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
3289
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3290
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
3291
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3292
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3293
                       supported_parameters=parameters)
3294
  return True, es_obj
3295

    
3296

    
3297
def _ExtStorageEnvironment(unique_id, ext_params,
3298
                           size=None, grow=None, metadata=None):
3299
  """Calculate the environment for an External Storage script.
3300

3301
  @type unique_id: tuple (driver, vol_name)
3302
  @param unique_id: ExtStorage pool and name of the Volume
3303
  @type ext_params: dict
3304
  @param ext_params: the EXT parameters
3305
  @type size: string
3306
  @param size: size of the Volume (in mebibytes)
3307
  @type grow: string
3308
  @param grow: new size of Volume after grow (in mebibytes)
3309
  @type metadata: string
3310
  @param metadata: metadata info of the Volume
3311
  @rtype: dict
3312
  @return: dict of environment variables
3313

3314
  """
3315
  vol_name = unique_id[1]
3316

    
3317
  result = {}
3318
  result["VOL_NAME"] = vol_name
3319

    
3320
  # EXT params
3321
  for pname, pvalue in ext_params.items():
3322
    result["EXTP_%s" % pname.upper()] = str(pvalue)
3323

    
3324
  if size is not None:
3325
    result["VOL_SIZE"] = size
3326

    
3327
  if grow is not None:
3328
    result["VOL_NEW_SIZE"] = grow
3329

    
3330
  if metadata is not None:
3331
    result["VOL_METADATA"] = metadata
3332

    
3333
  return result
3334

    
3335

    
3336
def _VolumeLogName(kind, es_name, volume):
3337
  """Compute the ExtStorage log filename for a given Volume and operation.
3338

3339
  @type kind: string
3340
  @param kind: the operation type (e.g. create, remove etc.)
3341
  @type es_name: string
3342
  @param es_name: the ExtStorage name
3343
  @type volume: string
3344
  @param volume: the name of the Volume inside the External Storage
3345

3346
  """
3347
  # Check if the extstorage log dir is a valid dir
3348
  if not os.path.isdir(pathutils.LOG_ES_DIR):
3349
    _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3350

    
3351
  # TODO: Use tempfile.mkstemp to create unique filename
3352
  base = ("%s-%s-%s-%s.log" %
3353
          (kind, es_name, volume, utils.TimestampForFilename()))
3354
  return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3355

    
3356

    
3357
DEV_MAP = {
3358
  constants.LD_LV: LogicalVolume,
3359
  constants.LD_DRBD8: DRBD8,
3360
  constants.LD_BLOCKDEV: PersistentBlockDevice,
3361
  constants.LD_RBD: RADOSBlockDevice,
3362
  constants.LD_EXT: ExtStorageDevice,
3363
  }
3364

    
3365
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3366
  DEV_MAP[constants.LD_FILE] = FileStorage
3367

    
3368

    
3369
def _VerifyDiskType(dev_type):
3370
  if dev_type not in DEV_MAP:
3371
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
3372

    
3373

    
3374
def _VerifyDiskParams(disk):
3375
  """Verifies if all disk parameters are set.
3376

3377
  """
3378
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3379
  if missing:
3380
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3381
                                 missing)
3382

    
3383

    
3384
def FindDevice(disk, children):
3385
  """Search for an existing, assembled device.
3386

3387
  This will succeed only if the device exists and is assembled, but it
3388
  does not do any actions in order to activate the device.
3389

3390
  @type disk: L{objects.Disk}
3391
  @param disk: the disk object to find
3392
  @type children: list of L{bdev.BlockDev}
3393
  @param children: the list of block devices that are children of the device
3394
                  represented by the disk parameter
3395

3396
  """
3397
  _VerifyDiskType(disk.dev_type)
3398
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3399
                                  disk.params)
3400
  if not device.attached:
3401
    return None
3402
  return device
3403

    
3404

    
3405
def Assemble(disk, children):
3406
  """Try to attach or assemble an existing device.
3407

3408
  This will attach to assemble the device, as needed, to bring it
3409
  fully up. It must be safe to run on already-assembled devices.
3410

3411
  @type disk: L{objects.Disk}
3412
  @param disk: the disk object to assemble
3413
  @type children: list of L{bdev.BlockDev}
3414
  @param children: the list of block devices that are children of the device
3415
                  represented by the disk parameter
3416

3417
  """
3418
  _VerifyDiskType(disk.dev_type)
3419
  _VerifyDiskParams(disk)
3420
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3421
                                  disk.params)
3422
  device.Assemble()
3423
  return device
3424

    
3425

    
3426
def Create(disk, children, excl_stor):
3427
  """Create a device.
3428

3429
  @type disk: L{objects.Disk}
3430
  @param disk: the disk object to create
3431
  @type children: list of L{bdev.BlockDev}
3432
  @param children: the list of block devices that are children of the device
3433
                  represented by the disk parameter
3434
  @type excl_stor: boolean
3435
  @param excl_stor: Whether exclusive_storage is active
3436

3437
  """
3438
  _VerifyDiskType(disk.dev_type)
3439
  _VerifyDiskParams(disk)
3440
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3441
                                         disk.params, excl_stor)
3442
  return device