Revision ce9283c1

b/Makefile.am
41 41
# the directory + 'dir' suffix
42 42
clientdir = $(pkgpythondir)/client
43 43
hypervisordir = $(pkgpythondir)/hypervisor
44
blockdir = $(pkgpythondir)/block
44 45
httpdir = $(pkgpythondir)/http
45 46
masterddir = $(pkgpythondir)/masterd
46 47
confddir = $(pkgpythondir)/confd
......
108 109
	lib/confd \
109 110
	lib/http \
110 111
	lib/hypervisor \
112
	lib/block \
111 113
	lib/impexpd \
112 114
	lib/masterd \
113 115
	lib/rapi \
......
257 259
	lib/__init__.py \
258 260
	lib/asyncnotifier.py \
259 261
	lib/backend.py \
260
	lib/bdev.py \
261 262
	lib/bootstrap.py \
262 263
	lib/cli.py \
263 264
	lib/cmdlib.py \
......
314 315
	lib/hypervisor/hv_lxc.py \
315 316
	lib/hypervisor/hv_xen.py
316 317

  
318
block_PYTHON = \
319
	lib/block/__init__.py \
320
	lib/block/bdev.py
321

  
317 322
rapi_PYTHON = \
318 323
	lib/rapi/__init__.py \
319 324
	lib/rapi/baserlib.py \
......
1112 1117
	test/py/ganeti.asyncnotifier_unittest.py \
1113 1118
	test/py/ganeti.backend_unittest-runasroot.py \
1114 1119
	test/py/ganeti.backend_unittest.py \
1115
	test/py/ganeti.bdev_unittest.py \
1120
	test/py/ganeti.block.bdev_unittest.py \
1116 1121
	test/py/ganeti.cli_unittest.py \
1117 1122
	test/py/ganeti.client.gnt_cluster_unittest.py \
1118 1123
	test/py/ganeti.client.gnt_instance_unittest.py \
......
1232 1237
	$(pkgpython_PYTHON) \
1233 1238
	$(client_PYTHON) \
1234 1239
	$(hypervisor_PYTHON) \
1240
	$(block_PYTHON) \
1235 1241
	$(rapi_PYTHON) \
1236 1242
	$(server_PYTHON) \
1237 1243
	$(pytools_PYTHON) \
b/lib/backend.py
54 54
from ganeti import ssh
55 55
from ganeti import hypervisor
56 56
from ganeti import constants
57
from ganeti import bdev
57
from ganeti.block import bdev
58 58
from ganeti import objects
59 59
from ganeti import ssconf
60 60
from ganeti import serializer
/dev/null
1
#
2
#
3

  
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

  
21

  
22
"""Block device abstraction"""
23

  
24
import re
25
import time
26
import errno
27
import shlex
28
import stat
29
import pyparsing as pyp
30
import os
31
import logging
32
import math
33

  
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import objects
38
from ganeti import compat
39
from ganeti import netutils
40
from ganeti import pathutils
41
from ganeti import serializer
42

  
43

  
44
# Size of reads in _CanReadDevice
45
_DEVICE_READ_SIZE = 128 * 1024
46

  
47

  
48
class RbdShowmappedJsonError(Exception):
49
  """`rbd showmmapped' JSON formatting error Exception class.
50

  
51
  """
52
  pass
53

  
54

  
55
def _IgnoreError(fn, *args, **kwargs):
56
  """Executes the given function, ignoring BlockDeviceErrors.
57

  
58
  This is used in order to simplify the execution of cleanup or
59
  rollback functions.
60

  
61
  @rtype: boolean
62
  @return: True when fn didn't raise an exception, False otherwise
63

  
64
  """
65
  try:
66
    fn(*args, **kwargs)
67
    return True
68
  except errors.BlockDeviceError, err:
69
    logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
70
    return False
71

  
72

  
73
def _ThrowError(msg, *args):
74
  """Log an error to the node daemon and the raise an exception.
75

  
76
  @type msg: string
77
  @param msg: the text of the exception
78
  @raise errors.BlockDeviceError
79

  
80
  """
81
  if args:
82
    msg = msg % args
83
  logging.error(msg)
84
  raise errors.BlockDeviceError(msg)
85

  
86

  
87
def _CheckResult(result):
88
  """Throws an error if the given result is a failed one.
89

  
90
  @param result: result from RunCmd
91

  
92
  """
93
  if result.failed:
94
    _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
95
                result.output)
96

  
97

  
98
def _CanReadDevice(path):
99
  """Check if we can read from the given device.
100

  
101
  This tries to read the first 128k of the device.
102

  
103
  """
104
  try:
105
    utils.ReadFile(path, size=_DEVICE_READ_SIZE)
106
    return True
107
  except EnvironmentError:
108
    logging.warning("Can't read from device %s", path, exc_info=True)
109
    return False
110

  
111

  
112
def _GetForbiddenFileStoragePaths():
113
  """Builds a list of path prefixes which shouldn't be used for file storage.
114

  
115
  @rtype: frozenset
116

  
117
  """
118
  paths = set([
119
    "/boot",
120
    "/dev",
121
    "/etc",
122
    "/home",
123
    "/proc",
124
    "/root",
125
    "/sys",
126
    ])
127

  
128
  for prefix in ["", "/usr", "/usr/local"]:
129
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
130
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
131

  
132
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
133

  
134

  
135
def _ComputeWrongFileStoragePaths(paths,
136
                                  _forbidden=_GetForbiddenFileStoragePaths()):
137
  """Cross-checks a list of paths for prefixes considered bad.
138

  
139
  Some paths, e.g. "/bin", should not be used for file storage.
140

  
141
  @type paths: list
142
  @param paths: List of paths to be checked
143
  @rtype: list
144
  @return: Sorted list of paths for which the user should be warned
145

  
146
  """
147
  def _Check(path):
148
    return (not os.path.isabs(path) or
149
            path in _forbidden or
150
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
151

  
152
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
153

  
154

  
155
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
156
  """Returns a list of file storage paths whose prefix is considered bad.
157

  
158
  See L{_ComputeWrongFileStoragePaths}.
159

  
160
  """
161
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
162

  
163

  
164
def _CheckFileStoragePath(path, allowed):
165
  """Checks if a path is in a list of allowed paths for file storage.
166

  
167
  @type path: string
168
  @param path: Path to check
169
  @type allowed: list
170
  @param allowed: List of allowed paths
171
  @raise errors.FileStoragePathError: If the path is not allowed
172

  
173
  """
174
  if not os.path.isabs(path):
175
    raise errors.FileStoragePathError("File storage path must be absolute,"
176
                                      " got '%s'" % path)
177

  
178
  for i in allowed:
179
    if not os.path.isabs(i):
180
      logging.info("Ignoring relative path '%s' for file storage", i)
181
      continue
182

  
183
    if utils.IsBelowDir(i, path):
184
      break
185
  else:
186
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
187
                                      " storage" % path)
188

  
189

  
190
def _LoadAllowedFileStoragePaths(filename):
191
  """Loads file containing allowed file storage paths.
192

  
193
  @rtype: list
194
  @return: List of allowed paths (can be an empty list)
195

  
196
  """
197
  try:
198
    contents = utils.ReadFile(filename)
199
  except EnvironmentError:
200
    return []
201
  else:
202
    return utils.FilterEmptyLinesAndComments(contents)
203

  
204

  
205
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
206
  """Checks if a path is allowed for file storage.
207

  
208
  @type path: string
209
  @param path: Path to check
210
  @raise errors.FileStoragePathError: If the path is not allowed
211

  
212
  """
213
  allowed = _LoadAllowedFileStoragePaths(_filename)
214

  
215
  if _ComputeWrongFileStoragePaths([path]):
216
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
217
                                      path)
218

  
219
  _CheckFileStoragePath(path, allowed)
220

  
221

  
222
class BlockDev(object):
223
  """Block device abstract class.
224

  
225
  A block device can be in the following states:
226
    - not existing on the system, and by `Create()` it goes into:
227
    - existing but not setup/not active, and by `Assemble()` goes into:
228
    - active read-write and by `Open()` it goes into
229
    - online (=used, or ready for use)
230

  
231
  A device can also be online but read-only, however we are not using
232
  the readonly state (LV has it, if needed in the future) and we are
233
  usually looking at this like at a stack, so it's easier to
234
  conceptualise the transition from not-existing to online and back
235
  like a linear one.
236

  
237
  The many different states of the device are due to the fact that we
238
  need to cover many device types:
239
    - logical volumes are created, lvchange -a y $lv, and used
240
    - drbd devices are attached to a local disk/remote peer and made primary
241

  
242
  A block device is identified by three items:
243
    - the /dev path of the device (dynamic)
244
    - a unique ID of the device (static)
245
    - it's major/minor pair (dynamic)
246

  
247
  Not all devices implement both the first two as distinct items. LVM
248
  logical volumes have their unique ID (the pair volume group, logical
249
  volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
250
  the /dev path is again dynamic and the unique id is the pair (host1,
251
  dev1), (host2, dev2).
252

  
253
  You can get to a device in two ways:
254
    - creating the (real) device, which returns you
255
      an attached instance (lvcreate)
256
    - attaching of a python instance to an existing (real) device
257

  
258
  The second point, the attachment to a device, is different
259
  depending on whether the device is assembled or not. At init() time,
260
  we search for a device with the same unique_id as us. If found,
261
  good. It also means that the device is already assembled. If not,
262
  after assembly we'll have our correct major/minor.
263

  
264
  """
265
  def __init__(self, unique_id, children, size, params):
266
    self._children = children
267
    self.dev_path = None
268
    self.unique_id = unique_id
269
    self.major = None
270
    self.minor = None
271
    self.attached = False
272
    self.size = size
273
    self.params = params
274

  
275
  def Assemble(self):
276
    """Assemble the device from its components.
277

  
278
    Implementations of this method by child classes must ensure that:
279
      - after the device has been assembled, it knows its major/minor
280
        numbers; this allows other devices (usually parents) to probe
281
        correctly for their children
282
      - calling this method on an existing, in-use device is safe
283
      - if the device is already configured (and in an OK state),
284
        this method is idempotent
285

  
286
    """
287
    pass
288

  
289
  def Attach(self):
290
    """Find a device which matches our config and attach to it.
291

  
292
    """
293
    raise NotImplementedError
294

  
295
  def Close(self):
296
    """Notifies that the device will no longer be used for I/O.
297

  
298
    """
299
    raise NotImplementedError
300

  
301
  @classmethod
302
  def Create(cls, unique_id, children, size, params, excl_stor):
303
    """Create the device.
304

  
305
    If the device cannot be created, it will return None
306
    instead. Error messages go to the logging system.
307

  
308
    Note that for some devices, the unique_id is used, and for other,
309
    the children. The idea is that these two, taken together, are
310
    enough for both creation and assembly (later).
311

  
312
    """
313
    raise NotImplementedError
314

  
315
  def Remove(self):
316
    """Remove this device.
317

  
318
    This makes sense only for some of the device types: LV and file
319
    storage. Also note that if the device can't attach, the removal
320
    can't be completed.
321

  
322
    """
323
    raise NotImplementedError
324

  
325
  def Rename(self, new_id):
326
    """Rename this device.
327

  
328
    This may or may not make sense for a given device type.
329

  
330
    """
331
    raise NotImplementedError
332

  
333
  def Open(self, force=False):
334
    """Make the device ready for use.
335

  
336
    This makes the device ready for I/O. For now, just the DRBD
337
    devices need this.
338

  
339
    The force parameter signifies that if the device has any kind of
340
    --force thing, it should be used, we know what we are doing.
341

  
342
    """
343
    raise NotImplementedError
344

  
345
  def Shutdown(self):
346
    """Shut down the device, freeing its children.
347

  
348
    This undoes the `Assemble()` work, except for the child
349
    assembling; as such, the children on the device are still
350
    assembled after this call.
351

  
352
    """
353
    raise NotImplementedError
354

  
355
  def SetSyncParams(self, params):
356
    """Adjust the synchronization parameters of the mirror.
357

  
358
    In case this is not a mirroring device, this is no-op.
359

  
360
    @param params: dictionary of LD level disk parameters related to the
361
    synchronization.
362
    @rtype: list
363
    @return: a list of error messages, emitted both by the current node and by
364
    children. An empty list means no errors.
365

  
366
    """
367
    result = []
368
    if self._children:
369
      for child in self._children:
370
        result.extend(child.SetSyncParams(params))
371
    return result
372

  
373
  def PauseResumeSync(self, pause):
374
    """Pause/Resume the sync of the mirror.
375

  
376
    In case this is not a mirroring device, this is no-op.
377

  
378
    @param pause: Whether to pause or resume
379

  
380
    """
381
    result = True
382
    if self._children:
383
      for child in self._children:
384
        result = result and child.PauseResumeSync(pause)
385
    return result
386

  
387
  def GetSyncStatus(self):
388
    """Returns the sync status of the device.
389

  
390
    If this device is a mirroring device, this function returns the
391
    status of the mirror.
392

  
393
    If sync_percent is None, it means the device is not syncing.
394

  
395
    If estimated_time is None, it means we can't estimate
396
    the time needed, otherwise it's the time left in seconds.
397

  
398
    If is_degraded is True, it means the device is missing
399
    redundancy. This is usually a sign that something went wrong in
400
    the device setup, if sync_percent is None.
401

  
402
    The ldisk parameter represents the degradation of the local
403
    data. This is only valid for some devices, the rest will always
404
    return False (not degraded).
405

  
406
    @rtype: objects.BlockDevStatus
407

  
408
    """
409
    return objects.BlockDevStatus(dev_path=self.dev_path,
410
                                  major=self.major,
411
                                  minor=self.minor,
412
                                  sync_percent=None,
413
                                  estimated_time=None,
414
                                  is_degraded=False,
415
                                  ldisk_status=constants.LDS_OKAY)
416

  
417
  def CombinedSyncStatus(self):
418
    """Calculate the mirror status recursively for our children.
419

  
420
    The return value is the same as for `GetSyncStatus()` except the
421
    minimum percent and maximum time are calculated across our
422
    children.
423

  
424
    @rtype: objects.BlockDevStatus
425

  
426
    """
427
    status = self.GetSyncStatus()
428

  
429
    min_percent = status.sync_percent
430
    max_time = status.estimated_time
431
    is_degraded = status.is_degraded
432
    ldisk_status = status.ldisk_status
433

  
434
    if self._children:
435
      for child in self._children:
436
        child_status = child.GetSyncStatus()
437

  
438
        if min_percent is None:
439
          min_percent = child_status.sync_percent
440
        elif child_status.sync_percent is not None:
441
          min_percent = min(min_percent, child_status.sync_percent)
442

  
443
        if max_time is None:
444
          max_time = child_status.estimated_time
445
        elif child_status.estimated_time is not None:
446
          max_time = max(max_time, child_status.estimated_time)
447

  
448
        is_degraded = is_degraded or child_status.is_degraded
449

  
450
        if ldisk_status is None:
451
          ldisk_status = child_status.ldisk_status
452
        elif child_status.ldisk_status is not None:
453
          ldisk_status = max(ldisk_status, child_status.ldisk_status)
454

  
455
    return objects.BlockDevStatus(dev_path=self.dev_path,
456
                                  major=self.major,
457
                                  minor=self.minor,
458
                                  sync_percent=min_percent,
459
                                  estimated_time=max_time,
460
                                  is_degraded=is_degraded,
461
                                  ldisk_status=ldisk_status)
462

  
463
  def SetInfo(self, text):
464
    """Update metadata with info text.
465

  
466
    Only supported for some device types.
467

  
468
    """
469
    for child in self._children:
470
      child.SetInfo(text)
471

  
472
  def Grow(self, amount, dryrun, backingstore):
473
    """Grow the block device.
474

  
475
    @type amount: integer
476
    @param amount: the amount (in mebibytes) to grow with
477
    @type dryrun: boolean
478
    @param dryrun: whether to execute the operation in simulation mode
479
        only, without actually increasing the size
480
    @param backingstore: whether to execute the operation on backing storage
481
        only, or on "logical" storage only; e.g. DRBD is logical storage,
482
        whereas LVM, file, RBD are backing storage
483

  
484
    """
485
    raise NotImplementedError
486

  
487
  def GetActualSize(self):
488
    """Return the actual disk size.
489

  
490
    @note: the device needs to be active when this is called
491

  
492
    """
493
    assert self.attached, "BlockDevice not attached in GetActualSize()"
494
    result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
495
    if result.failed:
496
      _ThrowError("blockdev failed (%s): %s",
497
                  result.fail_reason, result.output)
498
    try:
499
      sz = int(result.output.strip())
500
    except (ValueError, TypeError), err:
501
      _ThrowError("Failed to parse blockdev output: %s", str(err))
502
    return sz
503

  
504
  def __repr__(self):
505
    return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
506
            (self.__class__, self.unique_id, self._children,
507
             self.major, self.minor, self.dev_path))
508

  
509

  
510
class LogicalVolume(BlockDev):
511
  """Logical Volume block device.
512

  
513
  """
514
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
515
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
516
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
517

  
518
  def __init__(self, unique_id, children, size, params):
519
    """Attaches to a LV device.
520

  
521
    The unique_id is a tuple (vg_name, lv_name)
522

  
523
    """
524
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
525
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
526
      raise ValueError("Invalid configuration data %s" % str(unique_id))
527
    self._vg_name, self._lv_name = unique_id
528
    self._ValidateName(self._vg_name)
529
    self._ValidateName(self._lv_name)
530
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
531
    self._degraded = True
532
    self.major = self.minor = self.pe_size = self.stripe_count = None
533
    self.Attach()
534

  
535
  @staticmethod
536
  def _GetStdPvSize(pvs_info):
537
    """Return the the standard PV size (used with exclusive storage).
538

  
539
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
540
    @rtype: float
541
    @return: size in MiB
542

  
543
    """
544
    assert len(pvs_info) > 0
545
    smallest = min([pv.size for pv in pvs_info])
546
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
547

  
548
  @staticmethod
549
  def _ComputeNumPvs(size, pvs_info):
550
    """Compute the number of PVs needed for an LV (with exclusive storage).
551

  
552
    @type size: float
553
    @param size: LV size in MiB
554
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
555
    @rtype: integer
556
    @return: number of PVs needed
557
    """
558
    assert len(pvs_info) > 0
559
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
560
    return int(math.ceil(float(size) / pv_size))
561

  
562
  @staticmethod
563
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
564
    """Return a list of empty PVs, by name.
565

  
566
    """
567
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
568
    if max_pvs is not None:
569
      empty_pvs = empty_pvs[:max_pvs]
570
    return map((lambda pv: pv.name), empty_pvs)
571

  
572
  @classmethod
573
  def Create(cls, unique_id, children, size, params, excl_stor):
574
    """Create a new logical volume.
575

  
576
    """
577
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
578
      raise errors.ProgrammerError("Invalid configuration data %s" %
579
                                   str(unique_id))
580
    vg_name, lv_name = unique_id
581
    cls._ValidateName(vg_name)
582
    cls._ValidateName(lv_name)
583
    pvs_info = cls.GetPVInfo([vg_name])
584
    if not pvs_info:
585
      if excl_stor:
586
        msg = "No (empty) PVs found"
587
      else:
588
        msg = "Can't compute PV info for vg %s" % vg_name
589
      _ThrowError(msg)
590
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
591

  
592
    pvlist = [pv.name for pv in pvs_info]
593
    if compat.any(":" in v for v in pvlist):
594
      _ThrowError("Some of your PVs have the invalid character ':' in their"
595
                  " name, this is not supported - please filter them out"
596
                  " in lvm.conf using either 'filter' or 'preferred_names'")
597

  
598
    current_pvs = len(pvlist)
599
    desired_stripes = params[constants.LDP_STRIPES]
600
    stripes = min(current_pvs, desired_stripes)
601

  
602
    if excl_stor:
603
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
604
      if err_msgs:
605
        for m in err_msgs:
606
          logging.warning(m)
607
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
608
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
609
      current_pvs = len(pvlist)
610
      if current_pvs < req_pvs:
611
        _ThrowError("Not enough empty PVs to create a disk of %d MB:"
612
                    " %d available, %d needed", size, current_pvs, req_pvs)
613
      assert current_pvs == len(pvlist)
614
      if stripes > current_pvs:
615
        # No warning issued for this, as it's no surprise
616
        stripes = current_pvs
617

  
618
    else:
619
      if stripes < desired_stripes:
620
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
621
                        " available.", desired_stripes, vg_name, current_pvs)
622
      free_size = sum([pv.free for pv in pvs_info])
623
      # The size constraint should have been checked from the master before
624
      # calling the create function.
625
      if free_size < size:
626
        _ThrowError("Not enough free space: required %s,"
627
                    " available %s", size, free_size)
628

  
629
    # If the free space is not well distributed, we won't be able to
630
    # create an optimally-striped volume; in that case, we want to try
631
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
632
    # stripes
633
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
634
    for stripes_arg in range(stripes, 0, -1):
635
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
636
      if not result.failed:
637
        break
638
    if result.failed:
639
      _ThrowError("LV create failed (%s): %s",
640
                  result.fail_reason, result.output)
641
    return LogicalVolume(unique_id, children, size, params)
642

  
643
  @staticmethod
644
  def _GetVolumeInfo(lvm_cmd, fields):
645
    """Returns LVM Volume infos using lvm_cmd
646

  
647
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
648
    @param fields: Fields to return
649
    @return: A list of dicts each with the parsed fields
650

  
651
    """
652
    if not fields:
653
      raise errors.ProgrammerError("No fields specified")
654

  
655
    sep = "|"
656
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
657
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
658

  
659
    result = utils.RunCmd(cmd)
660
    if result.failed:
661
      raise errors.CommandError("Can't get the volume information: %s - %s" %
662
                                (result.fail_reason, result.output))
663

  
664
    data = []
665
    for line in result.stdout.splitlines():
666
      splitted_fields = line.strip().split(sep)
667

  
668
      if len(fields) != len(splitted_fields):
669
        raise errors.CommandError("Can't parse %s output: line '%s'" %
670
                                  (lvm_cmd, line))
671

  
672
      data.append(splitted_fields)
673

  
674
    return data
675

  
676
  @classmethod
677
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
678
    """Get the free space info for PVs in a volume group.
679

  
680
    @param vg_names: list of volume group names, if empty all will be returned
681
    @param filter_allocatable: whether to skip over unallocatable PVs
682
    @param include_lvs: whether to include a list of LVs hosted on each PV
683

  
684
    @rtype: list
685
    @return: list of objects.LvmPvInfo objects
686

  
687
    """
688
    # We request "lv_name" field only if we care about LVs, so we don't get
689
    # a long list of entries with many duplicates unless we really have to.
690
    # The duplicate "pv_name" field will be ignored.
691
    if include_lvs:
692
      lvfield = "lv_name"
693
    else:
694
      lvfield = "pv_name"
695
    try:
696
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
697
                                        "pv_attr", "pv_size", lvfield])
698
    except errors.GenericError, err:
699
      logging.error("Can't get PV information: %s", err)
700
      return None
701

  
702
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
703
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
704
    # out duplicates.
705
    if include_lvs:
706
      info.sort(key=(lambda i: (i[0], i[5])))
707
    data = []
708
    lastpvi = None
709
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
710
      # (possibly) skip over pvs which are not allocatable
711
      if filter_allocatable and pv_attr[0] != "a":
712
        continue
713
      # (possibly) skip over pvs which are not in the right volume group(s)
714
      if vg_names and vg_name not in vg_names:
715
        continue
716
      # Beware of duplicates (check before inserting)
717
      if lastpvi and lastpvi.name == pv_name:
718
        if include_lvs and lv_name:
719
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
720
            lastpvi.lv_list.append(lv_name)
721
      else:
722
        if include_lvs and lv_name:
723
          lvl = [lv_name]
724
        else:
725
          lvl = []
726
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
727
                                    size=float(pv_size), free=float(pv_free),
728
                                    attributes=pv_attr, lv_list=lvl)
729
        data.append(lastpvi)
730

  
731
    return data
732

  
733
  @classmethod
734
  def _GetExclusiveStorageVgFree(cls, vg_name):
735
    """Return the free disk space in the given VG, in exclusive storage mode.
736

  
737
    @type vg_name: string
738
    @param vg_name: VG name
739
    @rtype: float
740
    @return: free space in MiB
741
    """
742
    pvs_info = cls.GetPVInfo([vg_name])
743
    if not pvs_info:
744
      return 0.0
745
    pv_size = cls._GetStdPvSize(pvs_info)
746
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
747
    return pv_size * num_pvs
748

  
749
  @classmethod
750
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
751
    """Get the free space info for specific VGs.
752

  
753
    @param vg_names: list of volume group names, if empty all will be returned
754
    @param excl_stor: whether exclusive_storage is enabled
755
    @param filter_readonly: whether to skip over readonly VGs
756

  
757
    @rtype: list
758
    @return: list of tuples (free_space, total_size, name) with free_space in
759
             MiB
760

  
761
    """
762
    try:
763
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
764
                                        "vg_size"])
765
    except errors.GenericError, err:
766
      logging.error("Can't get VG information: %s", err)
767
      return None
768

  
769
    data = []
770
    for vg_name, vg_free, vg_attr, vg_size in info:
771
      # (possibly) skip over vgs which are not writable
772
      if filter_readonly and vg_attr[0] == "r":
773
        continue
774
      # (possibly) skip over vgs which are not in the right volume group(s)
775
      if vg_names and vg_name not in vg_names:
776
        continue
777
      # Exclusive storage needs a different concept of free space
778
      if excl_stor:
779
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
780
        assert es_free <= vg_free
781
        vg_free = es_free
782
      data.append((float(vg_free), float(vg_size), vg_name))
783

  
784
    return data
785

  
786
  @classmethod
787
  def _ValidateName(cls, name):
788
    """Validates that a given name is valid as VG or LV name.
789

  
790
    The list of valid characters and restricted names is taken out of
791
    the lvm(8) manpage, with the simplification that we enforce both
792
    VG and LV restrictions on the names.
793

  
794
    """
795
    if (not cls._VALID_NAME_RE.match(name) or
796
        name in cls._INVALID_NAMES or
797
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
798
      _ThrowError("Invalid LVM name '%s'", name)
799

  
800
  def Remove(self):
801
    """Remove this logical volume.
802

  
803
    """
804
    if not self.minor and not self.Attach():
805
      # the LV does not exist
806
      return
807
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
808
                           (self._vg_name, self._lv_name)])
809
    if result.failed:
810
      _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
811

  
812
  def Rename(self, new_id):
813
    """Rename this logical volume.
814

  
815
    """
816
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
817
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
818
    new_vg, new_name = new_id
819
    if new_vg != self._vg_name:
820
      raise errors.ProgrammerError("Can't move a logical volume across"
821
                                   " volume groups (from %s to to %s)" %
822
                                   (self._vg_name, new_vg))
823
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
824
    if result.failed:
825
      _ThrowError("Failed to rename the logical volume: %s", result.output)
826
    self._lv_name = new_name
827
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
828

  
829
  def Attach(self):
830
    """Attach to an existing LV.
831

  
832
    This method will try to see if an existing and active LV exists
833
    which matches our name. If so, its major/minor will be
834
    recorded.
835

  
836
    """
837
    self.attached = False
838
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
839
                           "--units=m", "--nosuffix",
840
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
841
                           "vg_extent_size,stripes", self.dev_path])
842
    if result.failed:
843
      logging.error("Can't find LV %s: %s, %s",
844
                    self.dev_path, result.fail_reason, result.output)
845
      return False
846
    # the output can (and will) have multiple lines for multi-segment
847
    # LVs, as the 'stripes' parameter is a segment one, so we take
848
    # only the last entry, which is the one we're interested in; note
849
    # that with LVM2 anyway the 'stripes' value must be constant
850
    # across segments, so this is a no-op actually
851
    out = result.stdout.splitlines()
852
    if not out: # totally empty result? splitlines() returns at least
853
                # one line for any non-empty string
854
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
855
      return False
856
    out = out[-1].strip().rstrip(",")
857
    out = out.split(",")
858
    if len(out) != 5:
859
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
860
      return False
861

  
862
    status, major, minor, pe_size, stripes = out
863
    if len(status) < 6:
864
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
865
      return False
866

  
867
    try:
868
      major = int(major)
869
      minor = int(minor)
870
    except (TypeError, ValueError), err:
871
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
872

  
873
    try:
874
      pe_size = int(float(pe_size))
875
    except (TypeError, ValueError), err:
876
      logging.error("Can't parse vg extent size: %s", err)
877
      return False
878

  
879
    try:
880
      stripes = int(stripes)
881
    except (TypeError, ValueError), err:
882
      logging.error("Can't parse the number of stripes: %s", err)
883
      return False
884

  
885
    self.major = major
886
    self.minor = minor
887
    self.pe_size = pe_size
888
    self.stripe_count = stripes
889
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
890
                                      # storage
891
    self.attached = True
892
    return True
893

  
894
  def Assemble(self):
895
    """Assemble the device.
896

  
897
    We always run `lvchange -ay` on the LV to ensure it's active before
898
    use, as there were cases when xenvg was not active after boot
899
    (also possibly after disk issues).
900

  
901
    """
902
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
903
    if result.failed:
904
      _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
905

  
906
  def Shutdown(self):
907
    """Shutdown the device.
908

  
909
    This is a no-op for the LV device type, as we don't deactivate the
910
    volumes on shutdown.
911

  
912
    """
913
    pass
914

  
915
  def GetSyncStatus(self):
916
    """Returns the sync status of the device.
917

  
918
    If this device is a mirroring device, this function returns the
919
    status of the mirror.
920

  
921
    For logical volumes, sync_percent and estimated_time are always
922
    None (no recovery in progress, as we don't handle the mirrored LV
923
    case). The is_degraded parameter is the inverse of the ldisk
924
    parameter.
925

  
926
    For the ldisk parameter, we check if the logical volume has the
927
    'virtual' type, which means it's not backed by existing storage
928
    anymore (read from it return I/O error). This happens after a
929
    physical disk failure and subsequent 'vgreduce --removemissing' on
930
    the volume group.
931

  
932
    The status was already read in Attach, so we just return it.
933

  
934
    @rtype: objects.BlockDevStatus
935

  
936
    """
937
    if self._degraded:
938
      ldisk_status = constants.LDS_FAULTY
939
    else:
940
      ldisk_status = constants.LDS_OKAY
941

  
942
    return objects.BlockDevStatus(dev_path=self.dev_path,
943
                                  major=self.major,
944
                                  minor=self.minor,
945
                                  sync_percent=None,
946
                                  estimated_time=None,
947
                                  is_degraded=self._degraded,
948
                                  ldisk_status=ldisk_status)
949

  
950
  def Open(self, force=False):
951
    """Make the device ready for I/O.
952

  
953
    This is a no-op for the LV device type.
954

  
955
    """
956
    pass
957

  
958
  def Close(self):
959
    """Notifies that the device will no longer be used for I/O.
960

  
961
    This is a no-op for the LV device type.
962

  
963
    """
964
    pass
965

  
966
  def Snapshot(self, size):
967
    """Create a snapshot copy of an lvm block device.
968

  
969
    @returns: tuple (vg, lv)
970

  
971
    """
972
    snap_name = self._lv_name + ".snap"
973

  
974
    # remove existing snapshot if found
975
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
976
    _IgnoreError(snap.Remove)
977

  
978
    vg_info = self.GetVGInfo([self._vg_name], False)
979
    if not vg_info:
980
      _ThrowError("Can't compute VG info for vg %s", self._vg_name)
981
    free_size, _, _ = vg_info[0]
982
    if free_size < size:
983
      _ThrowError("Not enough free space: required %s,"
984
                  " available %s", size, free_size)
985

  
986
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
987
                               "-n%s" % snap_name, self.dev_path]))
988

  
989
    return (self._vg_name, snap_name)
990

  
991
  def _RemoveOldInfo(self):
992
    """Try to remove old tags from the lv.
993

  
994
    """
995
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
996
                           self.dev_path])
997
    _CheckResult(result)
998

  
999
    raw_tags = result.stdout.strip()
1000
    if raw_tags:
1001
      for tag in raw_tags.split(","):
1002
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1003
                                   tag.strip(), self.dev_path]))
1004

  
1005
  def SetInfo(self, text):
1006
    """Update metadata with info text.
1007

  
1008
    """
1009
    BlockDev.SetInfo(self, text)
1010

  
1011
    self._RemoveOldInfo()
1012

  
1013
    # Replace invalid characters
1014
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1015
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1016

  
1017
    # Only up to 128 characters are allowed
1018
    text = text[:128]
1019

  
1020
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
1021

  
1022
  def Grow(self, amount, dryrun, backingstore):
1023
    """Grow the logical volume.
1024

  
1025
    """
1026
    if not backingstore:
1027
      return
1028
    if self.pe_size is None or self.stripe_count is None:
1029
      if not self.Attach():
1030
        _ThrowError("Can't attach to LV during Grow()")
1031
    full_stripe_size = self.pe_size * self.stripe_count
1032
    rest = amount % full_stripe_size
1033
    if rest != 0:
1034
      amount += full_stripe_size - rest
1035
    cmd = ["lvextend", "-L", "+%dm" % amount]
1036
    if dryrun:
1037
      cmd.append("--test")
1038
    # we try multiple algorithms since the 'best' ones might not have
1039
    # space available in the right place, but later ones might (since
1040
    # they have less constraints); also note that only recent LVM
1041
    # supports 'cling'
1042
    for alloc_policy in "contiguous", "cling", "normal":
1043
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1044
      if not result.failed:
1045
        return
1046
    _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1047

  
1048

  
1049
class DRBD8Status(object):
1050
  """A DRBD status representation class.
1051

  
1052
  Note that this doesn't support unconfigured devices (cs:Unconfigured).
1053

  
1054
  """
1055
  UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1056
  LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1057
                       "\s+ds:([^/]+)/(\S+)\s+.*$")
1058
  SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1059
                       # Due to a bug in drbd in the kernel, introduced in
1060
                       # commit 4b0715f096 (still unfixed as of 2011-08-22)
1061
                       "(?:\s|M)"
1062
                       "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1063

  
1064
  CS_UNCONFIGURED = "Unconfigured"
1065
  CS_STANDALONE = "StandAlone"
1066
  CS_WFCONNECTION = "WFConnection"
1067
  CS_WFREPORTPARAMS = "WFReportParams"
1068
  CS_CONNECTED = "Connected"
1069
  CS_STARTINGSYNCS = "StartingSyncS"
1070
  CS_STARTINGSYNCT = "StartingSyncT"
1071
  CS_WFBITMAPS = "WFBitMapS"
1072
  CS_WFBITMAPT = "WFBitMapT"
1073
  CS_WFSYNCUUID = "WFSyncUUID"
1074
  CS_SYNCSOURCE = "SyncSource"
1075
  CS_SYNCTARGET = "SyncTarget"
1076
  CS_PAUSEDSYNCS = "PausedSyncS"
1077
  CS_PAUSEDSYNCT = "PausedSyncT"
1078
  CSET_SYNC = compat.UniqueFrozenset([
1079
    CS_WFREPORTPARAMS,
1080
    CS_STARTINGSYNCS,
1081
    CS_STARTINGSYNCT,
1082
    CS_WFBITMAPS,
1083
    CS_WFBITMAPT,
1084
    CS_WFSYNCUUID,
1085
    CS_SYNCSOURCE,
1086
    CS_SYNCTARGET,
1087
    CS_PAUSEDSYNCS,
1088
    CS_PAUSEDSYNCT,
1089
    ])
1090

  
1091
  DS_DISKLESS = "Diskless"
1092
  DS_ATTACHING = "Attaching" # transient state
1093
  DS_FAILED = "Failed" # transient state, next: diskless
1094
  DS_NEGOTIATING = "Negotiating" # transient state
1095
  DS_INCONSISTENT = "Inconsistent" # while syncing or after creation
1096
  DS_OUTDATED = "Outdated"
1097
  DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected
1098
  DS_CONSISTENT = "Consistent"
1099
  DS_UPTODATE = "UpToDate" # normal state
1100

  
1101
  RO_PRIMARY = "Primary"
1102
  RO_SECONDARY = "Secondary"
1103
  RO_UNKNOWN = "Unknown"
1104

  
1105
  def __init__(self, procline):
1106
    u = self.UNCONF_RE.match(procline)
1107
    if u:
1108
      self.cstatus = self.CS_UNCONFIGURED
1109
      self.lrole = self.rrole = self.ldisk = self.rdisk = None
1110
    else:
1111
      m = self.LINE_RE.match(procline)
1112
      if not m:
1113
        raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1114
      self.cstatus = m.group(1)
1115
      self.lrole = m.group(2)
1116
      self.rrole = m.group(3)
1117
      self.ldisk = m.group(4)
1118
      self.rdisk = m.group(5)
1119

  
1120
    # end reading of data from the LINE_RE or UNCONF_RE
1121

  
1122
    self.is_standalone = self.cstatus == self.CS_STANDALONE
1123
    self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1124
    self.is_connected = self.cstatus == self.CS_CONNECTED
1125
    self.is_primary = self.lrole == self.RO_PRIMARY
1126
    self.is_secondary = self.lrole == self.RO_SECONDARY
1127
    self.peer_primary = self.rrole == self.RO_PRIMARY
1128
    self.peer_secondary = self.rrole == self.RO_SECONDARY
1129
    self.both_primary = self.is_primary and self.peer_primary
1130
    self.both_secondary = self.is_secondary and self.peer_secondary
1131

  
1132
    self.is_diskless = self.ldisk == self.DS_DISKLESS
1133
    self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1134

  
1135
    self.is_in_resync = self.cstatus in self.CSET_SYNC
1136
    self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1137

  
1138
    m = self.SYNC_RE.match(procline)
1139
    if m:
1140
      self.sync_percent = float(m.group(1))
1141
      hours = int(m.group(2))
1142
      minutes = int(m.group(3))
1143
      seconds = int(m.group(4))
1144
      self.est_time = hours * 3600 + minutes * 60 + seconds
1145
    else:
1146
      # we have (in this if branch) no percent information, but if
1147
      # we're resyncing we need to 'fake' a sync percent information,
1148
      # as this is how cmdlib determines if it makes sense to wait for
1149
      # resyncing or not
1150
      if self.is_in_resync:
1151
        self.sync_percent = 0
1152
      else:
1153
        self.sync_percent = None
1154
      self.est_time = None
1155

  
1156

  
1157
class BaseDRBD(BlockDev): # pylint: disable=W0223
1158
  """Base DRBD class.
1159

  
1160
  This class contains a few bits of common functionality between the
1161
  0.7 and 8.x versions of DRBD.
1162

  
1163
  """
1164
  _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1165
                           r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1166
  _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1167
  _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1168

  
1169
  _DRBD_MAJOR = 147
1170
  _ST_UNCONFIGURED = "Unconfigured"
1171
  _ST_WFCONNECTION = "WFConnection"
1172
  _ST_CONNECTED = "Connected"
1173

  
1174
  _STATUS_FILE = constants.DRBD_STATUS_FILE
1175
  _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1176

  
1177
  @staticmethod
1178
  def _GetProcData(filename=_STATUS_FILE):
1179
    """Return data from /proc/drbd.
1180

  
1181
    """
1182
    try:
1183
      data = utils.ReadFile(filename).splitlines()
1184
    except EnvironmentError, err:
1185
      if err.errno == errno.ENOENT:
1186
        _ThrowError("The file %s cannot be opened, check if the module"
1187
                    " is loaded (%s)", filename, str(err))
1188
      else:
1189
        _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1190
    if not data:
1191
      _ThrowError("Can't read any data from %s", filename)
1192
    return data
1193

  
1194
  @classmethod
1195
  def _MassageProcData(cls, data):
1196
    """Transform the output of _GetProdData into a nicer form.
1197

  
1198
    @return: a dictionary of minor: joined lines from /proc/drbd
1199
        for that minor
1200

  
1201
    """
1202
    results = {}
1203
    old_minor = old_line = None
1204
    for line in data:
1205
      if not line: # completely empty lines, as can be returned by drbd8.0+
1206
        continue
1207
      lresult = cls._VALID_LINE_RE.match(line)
1208
      if lresult is not None:
1209
        if old_minor is not None:
1210
          results[old_minor] = old_line
1211
        old_minor = int(lresult.group(1))
1212
        old_line = line
1213
      else:
1214
        if old_minor is not None:
1215
          old_line += " " + line.strip()
1216
    # add last line
1217
    if old_minor is not None:
1218
      results[old_minor] = old_line
1219
    return results
1220

  
1221
  @classmethod
1222
  def _GetVersion(cls, proc_data):
1223
    """Return the DRBD version.
1224

  
1225
    This will return a dict with keys:
1226
      - k_major
1227
      - k_minor
1228
      - k_point
1229
      - api
1230
      - proto
1231
      - proto2 (only on drbd > 8.2.X)
1232

  
1233
    """
1234
    first_line = proc_data[0].strip()
1235
    version = cls._VERSION_RE.match(first_line)
1236
    if not version:
1237
      raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1238
                                    first_line)
1239

  
1240
    values = version.groups()
1241
    retval = {
1242
      "k_major": int(values[0]),
1243
      "k_minor": int(values[1]),
1244
      "k_point": int(values[2]),
1245
      "api": int(values[3]),
1246
      "proto": int(values[4]),
1247
      }
1248
    if values[5] is not None:
1249
      retval["proto2"] = values[5]
1250

  
1251
    return retval
1252

  
1253
  @staticmethod
1254
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
1255
    """Returns DRBD usermode_helper currently set.
1256

  
1257
    """
1258
    try:
1259
      helper = utils.ReadFile(filename).splitlines()[0]
1260
    except EnvironmentError, err:
1261
      if err.errno == errno.ENOENT:
1262
        _ThrowError("The file %s cannot be opened, check if the module"
1263
                    " is loaded (%s)", filename, str(err))
1264
      else:
1265
        _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1266
    if not helper:
1267
      _ThrowError("Can't read any data from %s", filename)
1268
    return helper
1269

  
1270
  @staticmethod
1271
  def _DevPath(minor):
1272
    """Return the path to a drbd device for a given minor.
1273

  
1274
    """
1275
    return "/dev/drbd%d" % minor
1276

  
1277
  @classmethod
1278
  def GetUsedDevs(cls):
1279
    """Compute the list of used DRBD devices.
1280

  
1281
    """
1282
    data = cls._GetProcData()
1283

  
1284
    used_devs = {}
1285
    for line in data:
1286
      match = cls._VALID_LINE_RE.match(line)
1287
      if not match:
1288
        continue
1289
      minor = int(match.group(1))
1290
      state = match.group(2)
1291
      if state == cls._ST_UNCONFIGURED:
1292
        continue
1293
      used_devs[minor] = state, line
1294

  
1295
    return used_devs
1296

  
1297
  def _SetFromMinor(self, minor):
1298
    """Set our parameters based on the given minor.
1299

  
1300
    This sets our minor variable and our dev_path.
1301

  
1302
    """
1303
    if minor is None:
1304
      self.minor = self.dev_path = None
1305
      self.attached = False
1306
    else:
1307
      self.minor = minor
1308
      self.dev_path = self._DevPath(minor)
1309
      self.attached = True
1310

  
1311
  @staticmethod
1312
  def _CheckMetaSize(meta_device):
1313
    """Check if the given meta device looks like a valid one.
1314

  
1315
    This currently only checks the size, which must be around
1316
    128MiB.
1317

  
1318
    """
1319
    result = utils.RunCmd(["blockdev", "--getsize", meta_device])
1320
    if result.failed:
1321
      _ThrowError("Failed to get device size: %s - %s",
1322
                  result.fail_reason, result.output)
1323
    try:
1324
      sectors = int(result.stdout)
1325
    except (TypeError, ValueError):
1326
      _ThrowError("Invalid output from blockdev: '%s'", result.stdout)
1327
    num_bytes = sectors * 512
1328
    if num_bytes < 128 * 1024 * 1024: # less than 128MiB
1329
      _ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024))
1330
    # the maximum *valid* size of the meta device when living on top
1331
    # of LVM is hard to compute: it depends on the number of stripes
1332
    # and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
1333
    # (normal size), but an eight-stripe 128MB PE will result in a 1GB
1334
    # size meta device; as such, we restrict it to 1GB (a little bit
1335
    # too generous, but making assumptions about PE size is hard)
1336
    if num_bytes > 1024 * 1024 * 1024:
1337
      _ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024))
1338

  
1339
  def Rename(self, new_id):
1340
    """Rename a device.
1341

  
1342
    This is not supported for drbd devices.
1343

  
1344
    """
1345
    raise errors.ProgrammerError("Can't rename a drbd device")
1346

  
1347

  
1348
class DRBD8(BaseDRBD):
1349
  """DRBD v8.x block device.
1350

  
1351
  This implements the local host part of the DRBD device, i.e. it
1352
  doesn't do anything to the supposed peer. If you need a fully
1353
  connected DRBD pair, you need to use this class on both hosts.
1354

  
1355
  The unique_id for the drbd device is a (local_ip, local_port,
1356
  remote_ip, remote_port, local_minor, secret) tuple, and it must have
1357
  two children: the data device and the meta_device. The meta device
1358
  is checked for valid size and is zeroed on create.
1359

  
1360
  """
1361
  _MAX_MINORS = 255
1362
  _PARSE_SHOW = None
1363

  
1364
  # timeout constants
1365
  _NET_RECONFIG_TIMEOUT = 60
1366

  
1367
  # command line options for barriers
1368
  _DISABLE_DISK_OPTION = "--no-disk-barrier"  # -a
1369
  _DISABLE_DRAIN_OPTION = "--no-disk-drain"   # -D
1370
  _DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i
1371
  _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"  # -m
1372

  
1373
  def __init__(self, unique_id, children, size, params):
1374
    if children and children.count(None) > 0:
1375
      children = []
1376
    if len(children) not in (0, 2):
1377
      raise ValueError("Invalid configuration data %s" % str(children))
1378
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1379
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1380
    (self._lhost, self._lport,
1381
     self._rhost, self._rport,
1382
     self._aminor, self._secret) = unique_id
1383
    if children:
1384
      if not _CanReadDevice(children[1].dev_path):
1385
        logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1386
        children = []
1387
    super(DRBD8, self).__init__(unique_id, children, size, params)
1388
    self.major = self._DRBD_MAJOR
1389
    version = self._GetVersion(self._GetProcData())
1390
    if version["k_major"] != 8:
1391
      _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1392
                  " usage: kernel is %s.%s, ganeti wants 8.x",
1393
                  version["k_major"], version["k_minor"])
1394

  
1395
    if (self._lhost is not None and self._lhost == self._rhost and
1396
        self._lport == self._rport):
1397
      raise ValueError("Invalid configuration data, same local/remote %s" %
1398
                       (unique_id,))
1399
    self.Attach()
1400

  
1401
  @classmethod
1402
  def _InitMeta(cls, minor, dev_path):
1403
    """Initialize a meta device.
1404

  
1405
    This will not work if the given minor is in use.
1406

  
1407
    """
1408
    # Zero the metadata first, in order to make sure drbdmeta doesn't
1409
    # try to auto-detect existing filesystems or similar (see
1410
    # http://code.google.com/p/ganeti/issues/detail?id=182); we only
1411
    # care about the first 128MB of data in the device, even though it
1412
    # can be bigger
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff