Statistics
| Branch: | Tag: | Revision:

root / lib / storage / bdev.py @ c31ec06b

History | View | Annotate | Download (59.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import errno
26
import stat
27
import os
28
import logging
29
import math
30

    
31
from ganeti import utils
32
from ganeti import errors
33
from ganeti import constants
34
from ganeti import objects
35
from ganeti import compat
36
from ganeti import pathutils
37
from ganeti import serializer
38
from ganeti.storage import drbd
39
from ganeti.storage import base
40

    
41

    
42
class RbdShowmappedJsonError(Exception):
43
  """`rbd showmmapped' JSON formatting error Exception class.
44

45
  """
46
  pass
47

    
48

    
49
def _CheckResult(result):
50
  """Throws an error if the given result is a failed one.
51

52
  @param result: result from RunCmd
53

54
  """
55
  if result.failed:
56
    base.ThrowError("Command: %s error: %s - %s",
57
                    result.cmd, result.fail_reason, result.output)
58

    
59

    
60
def _GetForbiddenFileStoragePaths():
61
  """Builds a list of path prefixes which shouldn't be used for file storage.
62

63
  @rtype: frozenset
64

65
  """
66
  paths = set([
67
    "/boot",
68
    "/dev",
69
    "/etc",
70
    "/home",
71
    "/proc",
72
    "/root",
73
    "/sys",
74
    ])
75

    
76
  for prefix in ["", "/usr", "/usr/local"]:
77
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
78
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
79

    
80
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
81

    
82

    
83
def _ComputeWrongFileStoragePaths(paths,
84
                                  _forbidden=_GetForbiddenFileStoragePaths()):
85
  """Cross-checks a list of paths for prefixes considered bad.
86

87
  Some paths, e.g. "/bin", should not be used for file storage.
88

89
  @type paths: list
90
  @param paths: List of paths to be checked
91
  @rtype: list
92
  @return: Sorted list of paths for which the user should be warned
93

94
  """
95
  def _Check(path):
96
    return (not os.path.isabs(path) or
97
            path in _forbidden or
98
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
99

    
100
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
101

    
102

    
103
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
104
  """Returns a list of file storage paths whose prefix is considered bad.
105

106
  See L{_ComputeWrongFileStoragePaths}.
107

108
  """
109
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
110

    
111

    
112
def _CheckFileStoragePath(path, allowed):
113
  """Checks if a path is in a list of allowed paths for file storage.
114

115
  @type path: string
116
  @param path: Path to check
117
  @type allowed: list
118
  @param allowed: List of allowed paths
119
  @raise errors.FileStoragePathError: If the path is not allowed
120

121
  """
122
  if not os.path.isabs(path):
123
    raise errors.FileStoragePathError("File storage path must be absolute,"
124
                                      " got '%s'" % path)
125

    
126
  for i in allowed:
127
    if not os.path.isabs(i):
128
      logging.info("Ignoring relative path '%s' for file storage", i)
129
      continue
130

    
131
    if utils.IsBelowDir(i, path):
132
      break
133
  else:
134
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
135
                                      " storage" % path)
136

    
137

    
138
def _LoadAllowedFileStoragePaths(filename):
139
  """Loads file containing allowed file storage paths.
140

141
  @rtype: list
142
  @return: List of allowed paths (can be an empty list)
143

144
  """
145
  try:
146
    contents = utils.ReadFile(filename)
147
  except EnvironmentError:
148
    return []
149
  else:
150
    return utils.FilterEmptyLinesAndComments(contents)
151

    
152

    
153
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
154
  """Checks if a path is allowed for file storage.
155

156
  @type path: string
157
  @param path: Path to check
158
  @raise errors.FileStoragePathError: If the path is not allowed
159

160
  """
161
  allowed = _LoadAllowedFileStoragePaths(_filename)
162

    
163
  if _ComputeWrongFileStoragePaths([path]):
164
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
165
                                      path)
166

    
167
  _CheckFileStoragePath(path, allowed)
168

    
169

    
170
class LogicalVolume(base.BlockDev):
171
  """Logical Volume block device.
172

173
  """
174
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
175
  _PARSE_PV_DEV_RE = re.compile("^([^ ()]+)\([0-9]+\)$")
176
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
177
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
178

    
179
  def __init__(self, unique_id, children, size, params):
180
    """Attaches to a LV device.
181

182
    The unique_id is a tuple (vg_name, lv_name)
183

184
    """
185
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
186
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
187
      raise ValueError("Invalid configuration data %s" % str(unique_id))
188
    self._vg_name, self._lv_name = unique_id
189
    self._ValidateName(self._vg_name)
190
    self._ValidateName(self._lv_name)
191
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
192
    self._degraded = True
193
    self.major = self.minor = self.pe_size = self.stripe_count = None
194
    self.pv_names = None
195
    self.Attach()
196

    
197
  @staticmethod
198
  def _GetStdPvSize(pvs_info):
199
    """Return the the standard PV size (used with exclusive storage).
200

201
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
202
    @rtype: float
203
    @return: size in MiB
204

205
    """
206
    assert len(pvs_info) > 0
207
    smallest = min([pv.size for pv in pvs_info])
208
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
209

    
210
  @staticmethod
211
  def _ComputeNumPvs(size, pvs_info):
212
    """Compute the number of PVs needed for an LV (with exclusive storage).
213

214
    @type size: float
215
    @param size: LV size in MiB
216
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
217
    @rtype: integer
218
    @return: number of PVs needed
219
    """
220
    assert len(pvs_info) > 0
221
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
222
    return int(math.ceil(float(size) / pv_size))
223

    
224
  @staticmethod
225
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
226
    """Return a list of empty PVs, by name.
227

228
    """
229
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
230
    if max_pvs is not None:
231
      empty_pvs = empty_pvs[:max_pvs]
232
    return map((lambda pv: pv.name), empty_pvs)
233

    
234
  @classmethod
235
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
236
    """Create a new logical volume.
237

238
    """
239
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
240
      raise errors.ProgrammerError("Invalid configuration data %s" %
241
                                   str(unique_id))
242
    vg_name, lv_name = unique_id
243
    cls._ValidateName(vg_name)
244
    cls._ValidateName(lv_name)
245
    pvs_info = cls.GetPVInfo([vg_name])
246
    if not pvs_info:
247
      if excl_stor:
248
        msg = "No (empty) PVs found"
249
      else:
250
        msg = "Can't compute PV info for vg %s" % vg_name
251
      base.ThrowError(msg)
252
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
253

    
254
    pvlist = [pv.name for pv in pvs_info]
255
    if compat.any(":" in v for v in pvlist):
256
      base.ThrowError("Some of your PVs have the invalid character ':' in their"
257
                      " name, this is not supported - please filter them out"
258
                      " in lvm.conf using either 'filter' or 'preferred_names'")
259

    
260
    current_pvs = len(pvlist)
261
    desired_stripes = params[constants.LDP_STRIPES]
262
    stripes = min(current_pvs, desired_stripes)
263

    
264
    if excl_stor:
265
      if spindles is None:
266
        base.ThrowError("Unspecified number of spindles: this is required"
267
                        "when exclusive storage is enabled, try running"
268
                        " gnt-cluster repair-disk-sizes")
269
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
270
      if err_msgs:
271
        for m in err_msgs:
272
          logging.warning(m)
273
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
274
      if spindles < req_pvs:
275
        base.ThrowError("Requested number of spindles (%s) is not enough for"
276
                        " a disk of %d MB (at least %d spindles needed)",
277
                        spindles, size, req_pvs)
278
      else:
279
        req_pvs = spindles
280
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
281
      current_pvs = len(pvlist)
282
      if current_pvs < req_pvs:
283
        base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
284
                        " MB: %d available, %d needed",
285
                        size, current_pvs, req_pvs)
286
      assert current_pvs == len(pvlist)
287
      # We must update stripes to be sure to use all the desired spindles
288
      stripes = current_pvs
289
      if stripes > desired_stripes:
290
        # Don't warn when lowering stripes, as it's no surprise
291
        logging.warning("Using %s stripes instead of %s, to be able to use"
292
                        " %s spindles", stripes, desired_stripes, current_pvs)
293

    
294
    else:
295
      if stripes < desired_stripes:
296
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
297
                        " available.", desired_stripes, vg_name, current_pvs)
298
      free_size = sum([pv.free for pv in pvs_info])
299
      # The size constraint should have been checked from the master before
300
      # calling the create function.
301
      if free_size < size:
302
        base.ThrowError("Not enough free space: required %s,"
303
                        " available %s", size, free_size)
304

    
305
    # If the free space is not well distributed, we won't be able to
306
    # create an optimally-striped volume; in that case, we want to try
307
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
308
    # stripes
309
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
310
    for stripes_arg in range(stripes, 0, -1):
311
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
312
      if not result.failed:
313
        break
314
    if result.failed:
315
      base.ThrowError("LV create failed (%s): %s",
316
                      result.fail_reason, result.output)
317
    return LogicalVolume(unique_id, children, size, params)
318

    
319
  @staticmethod
320
  def _GetVolumeInfo(lvm_cmd, fields):
321
    """Returns LVM Volume infos using lvm_cmd
322

323
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
324
    @param fields: Fields to return
325
    @return: A list of dicts each with the parsed fields
326

327
    """
328
    if not fields:
329
      raise errors.ProgrammerError("No fields specified")
330

    
331
    sep = "|"
332
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
333
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
334

    
335
    result = utils.RunCmd(cmd)
336
    if result.failed:
337
      raise errors.CommandError("Can't get the volume information: %s - %s" %
338
                                (result.fail_reason, result.output))
339

    
340
    data = []
341
    for line in result.stdout.splitlines():
342
      splitted_fields = line.strip().split(sep)
343

    
344
      if len(fields) != len(splitted_fields):
345
        raise errors.CommandError("Can't parse %s output: line '%s'" %
346
                                  (lvm_cmd, line))
347

    
348
      data.append(splitted_fields)
349

    
350
    return data
351

    
352
  @classmethod
353
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
354
    """Get the free space info for PVs in a volume group.
355

356
    @param vg_names: list of volume group names, if empty all will be returned
357
    @param filter_allocatable: whether to skip over unallocatable PVs
358
    @param include_lvs: whether to include a list of LVs hosted on each PV
359

360
    @rtype: list
361
    @return: list of objects.LvmPvInfo objects
362

363
    """
364
    # We request "lv_name" field only if we care about LVs, so we don't get
365
    # a long list of entries with many duplicates unless we really have to.
366
    # The duplicate "pv_name" field will be ignored.
367
    if include_lvs:
368
      lvfield = "lv_name"
369
    else:
370
      lvfield = "pv_name"
371
    try:
372
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
373
                                        "pv_attr", "pv_size", lvfield])
374
    except errors.GenericError, err:
375
      logging.error("Can't get PV information: %s", err)
376
      return None
377

    
378
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
379
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
380
    # out duplicates.
381
    if include_lvs:
382
      info.sort(key=(lambda i: (i[0], i[5])))
383
    data = []
384
    lastpvi = None
385
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
386
      # (possibly) skip over pvs which are not allocatable
387
      if filter_allocatable and pv_attr[0] != "a":
388
        continue
389
      # (possibly) skip over pvs which are not in the right volume group(s)
390
      if vg_names and vg_name not in vg_names:
391
        continue
392
      # Beware of duplicates (check before inserting)
393
      if lastpvi and lastpvi.name == pv_name:
394
        if include_lvs and lv_name:
395
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
396
            lastpvi.lv_list.append(lv_name)
397
      else:
398
        if include_lvs and lv_name:
399
          lvl = [lv_name]
400
        else:
401
          lvl = []
402
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
403
                                    size=float(pv_size), free=float(pv_free),
404
                                    attributes=pv_attr, lv_list=lvl)
405
        data.append(lastpvi)
406

    
407
    return data
408

    
409
  @classmethod
410
  def _GetRawFreePvInfo(cls, vg_name):
411
    """Return info (size/free) about PVs.
412

413
    @type vg_name: string
414
    @param vg_name: VG name
415
    @rtype: tuple
416
    @return: (standard_pv_size_in_MiB, number_of_free_pvs, total_number_of_pvs)
417

418
    """
419
    pvs_info = cls.GetPVInfo([vg_name])
420
    if not pvs_info:
421
      pv_size = 0.0
422
      free_pvs = 0
423
      num_pvs = 0
424
    else:
425
      pv_size = cls._GetStdPvSize(pvs_info)
426
      free_pvs = len(cls._GetEmptyPvNames(pvs_info))
427
      num_pvs = len(pvs_info)
428
    return (pv_size, free_pvs, num_pvs)
429

    
430
  @classmethod
431
  def _GetExclusiveStorageVgFree(cls, vg_name):
432
    """Return the free disk space in the given VG, in exclusive storage mode.
433

434
    @type vg_name: string
435
    @param vg_name: VG name
436
    @rtype: float
437
    @return: free space in MiB
438
    """
439
    (pv_size, free_pvs, _) = cls._GetRawFreePvInfo(vg_name)
440
    return pv_size * free_pvs
441

    
442
  @classmethod
443
  def GetVgSpindlesInfo(cls, vg_name):
444
    """Get the free space info for specific VGs.
445

446
    @param vg_name: volume group name
447
    @rtype: tuple
448
    @return: (free_spindles, total_spindles)
449

450
    """
451
    (_, free_pvs, num_pvs) = cls._GetRawFreePvInfo(vg_name)
452
    return (free_pvs, num_pvs)
453

    
454
  @classmethod
455
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
456
    """Get the free space info for specific VGs.
457

458
    @param vg_names: list of volume group names, if empty all will be returned
459
    @param excl_stor: whether exclusive_storage is enabled
460
    @param filter_readonly: whether to skip over readonly VGs
461

462
    @rtype: list
463
    @return: list of tuples (free_space, total_size, name) with free_space in
464
             MiB
465

466
    """
467
    try:
468
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
469
                                        "vg_size"])
470
    except errors.GenericError, err:
471
      logging.error("Can't get VG information: %s", err)
472
      return None
473

    
474
    data = []
475
    for vg_name, vg_free, vg_attr, vg_size in info:
476
      # (possibly) skip over vgs which are not writable
477
      if filter_readonly and vg_attr[0] == "r":
478
        continue
479
      # (possibly) skip over vgs which are not in the right volume group(s)
480
      if vg_names and vg_name not in vg_names:
481
        continue
482
      # Exclusive storage needs a different concept of free space
483
      if excl_stor:
484
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
485
        assert es_free <= vg_free
486
        vg_free = es_free
487
      data.append((float(vg_free), float(vg_size), vg_name))
488

    
489
    return data
490

    
491
  @classmethod
492
  def _ValidateName(cls, name):
493
    """Validates that a given name is valid as VG or LV name.
494

495
    The list of valid characters and restricted names is taken out of
496
    the lvm(8) manpage, with the simplification that we enforce both
497
    VG and LV restrictions on the names.
498

499
    """
500
    if (not cls._VALID_NAME_RE.match(name) or
501
        name in cls._INVALID_NAMES or
502
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
503
      base.ThrowError("Invalid LVM name '%s'", name)
504

    
505
  def Remove(self):
506
    """Remove this logical volume.
507

508
    """
509
    if not self.minor and not self.Attach():
510
      # the LV does not exist
511
      return
512
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
513
                           (self._vg_name, self._lv_name)])
514
    if result.failed:
515
      base.ThrowError("Can't lvremove: %s - %s",
516
                      result.fail_reason, result.output)
517

    
518
  def Rename(self, new_id):
519
    """Rename this logical volume.
520

521
    """
522
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
523
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
524
    new_vg, new_name = new_id
525
    if new_vg != self._vg_name:
526
      raise errors.ProgrammerError("Can't move a logical volume across"
527
                                   " volume groups (from %s to to %s)" %
528
                                   (self._vg_name, new_vg))
529
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
530
    if result.failed:
531
      base.ThrowError("Failed to rename the logical volume: %s", result.output)
532
    self._lv_name = new_name
533
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
534

    
535
  @classmethod
536
  def _ParseLvInfoLine(cls, line, sep):
537
    """Parse one line of the lvs output used in L{_GetLvInfo}.
538

539
    """
540
    elems = line.strip().rstrip(sep).split(sep)
541
    if len(elems) != 6:
542
      base.ThrowError("Can't parse LVS output, len(%s) != 6", str(elems))
543

    
544
    (status, major, minor, pe_size, stripes, pvs) = elems
545
    if len(status) < 6:
546
      base.ThrowError("lvs lv_attr is not at least 6 characters (%s)", status)
547

    
548
    try:
549
      major = int(major)
550
      minor = int(minor)
551
    except (TypeError, ValueError), err:
552
      base.ThrowError("lvs major/minor cannot be parsed: %s", str(err))
553

    
554
    try:
555
      pe_size = int(float(pe_size))
556
    except (TypeError, ValueError), err:
557
      base.ThrowError("Can't parse vg extent size: %s", err)
558

    
559
    try:
560
      stripes = int(stripes)
561
    except (TypeError, ValueError), err:
562
      base.ThrowError("Can't parse the number of stripes: %s", err)
563

    
564
    pv_names = []
565
    for pv in pvs.split(","):
566
      m = re.match(cls._PARSE_PV_DEV_RE, pv)
567
      if not m:
568
        base.ThrowError("Can't parse this device list: %s", pvs)
569
      pv_names.append(m.group(1))
570
    assert len(pv_names) > 0
571

    
572
    return (status, major, minor, pe_size, stripes, pv_names)
573

    
574
  @classmethod
575
  def _GetLvInfo(cls, dev_path, _run_cmd=utils.RunCmd):
576
    """Get info about the given existing LV to be used.
577

578
    """
579
    sep = "|"
580
    result = _run_cmd(["lvs", "--noheadings", "--separator=%s" % sep,
581
                       "--units=k", "--nosuffix",
582
                       "-olv_attr,lv_kernel_major,lv_kernel_minor,"
583
                       "vg_extent_size,stripes,devices", dev_path])
584
    if result.failed:
585
      base.ThrowError("Can't find LV %s: %s, %s",
586
                      dev_path, result.fail_reason, result.output)
587
    # the output can (and will) have multiple lines for multi-segment
588
    # LVs, as the 'stripes' parameter is a segment one, so we take
589
    # only the last entry, which is the one we're interested in; note
590
    # that with LVM2 anyway the 'stripes' value must be constant
591
    # across segments, so this is a no-op actually
592
    out = result.stdout.splitlines()
593
    if not out: # totally empty result? splitlines() returns at least
594
                # one line for any non-empty string
595
      base.ThrowError("Can't parse LVS output, no lines? Got '%s'", str(out))
596
    pv_names = set()
597
    for line in out:
598
      (status, major, minor, pe_size, stripes, more_pvs) = \
599
        cls._ParseLvInfoLine(line, sep)
600
      pv_names.update(more_pvs)
601
    return (status, major, minor, pe_size, stripes, pv_names)
602

    
603
  def Attach(self):
604
    """Attach to an existing LV.
605

606
    This method will try to see if an existing and active LV exists
607
    which matches our name. If so, its major/minor will be
608
    recorded.
609

610
    """
611
    self.attached = False
612
    try:
613
      (status, major, minor, pe_size, stripes, pv_names) = \
614
        self._GetLvInfo(self.dev_path)
615
    except errors.BlockDeviceError:
616
      return False
617

    
618
    self.major = major
619
    self.minor = minor
620
    self.pe_size = pe_size
621
    self.stripe_count = stripes
622
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
623
                                      # storage
624
    self.pv_names = pv_names
625
    self.attached = True
626
    return True
627

    
628
  def Assemble(self):
629
    """Assemble the device.
630

631
    We always run `lvchange -ay` on the LV to ensure it's active before
632
    use, as there were cases when xenvg was not active after boot
633
    (also possibly after disk issues).
634

635
    """
636
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
637
    if result.failed:
638
      base.ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
639

    
640
  def Shutdown(self):
641
    """Shutdown the device.
642

643
    This is a no-op for the LV device type, as we don't deactivate the
644
    volumes on shutdown.
645

646
    """
647
    pass
648

    
649
  def GetSyncStatus(self):
650
    """Returns the sync status of the device.
651

652
    If this device is a mirroring device, this function returns the
653
    status of the mirror.
654

655
    For logical volumes, sync_percent and estimated_time are always
656
    None (no recovery in progress, as we don't handle the mirrored LV
657
    case). The is_degraded parameter is the inverse of the ldisk
658
    parameter.
659

660
    For the ldisk parameter, we check if the logical volume has the
661
    'virtual' type, which means it's not backed by existing storage
662
    anymore (read from it return I/O error). This happens after a
663
    physical disk failure and subsequent 'vgreduce --removemissing' on
664
    the volume group.
665

666
    The status was already read in Attach, so we just return it.
667

668
    @rtype: objects.BlockDevStatus
669

670
    """
671
    if self._degraded:
672
      ldisk_status = constants.LDS_FAULTY
673
    else:
674
      ldisk_status = constants.LDS_OKAY
675

    
676
    return objects.BlockDevStatus(dev_path=self.dev_path,
677
                                  major=self.major,
678
                                  minor=self.minor,
679
                                  sync_percent=None,
680
                                  estimated_time=None,
681
                                  is_degraded=self._degraded,
682
                                  ldisk_status=ldisk_status)
683

    
684
  def Open(self, force=False):
685
    """Make the device ready for I/O.
686

687
    This is a no-op for the LV device type.
688

689
    """
690
    pass
691

    
692
  def Close(self):
693
    """Notifies that the device will no longer be used for I/O.
694

695
    This is a no-op for the LV device type.
696

697
    """
698
    pass
699

    
700
  def Snapshot(self, size):
701
    """Create a snapshot copy of an lvm block device.
702

703
    @returns: tuple (vg, lv)
704

705
    """
706
    snap_name = self._lv_name + ".snap"
707

    
708
    # remove existing snapshot if found
709
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
710
    base.IgnoreError(snap.Remove)
711

    
712
    vg_info = self.GetVGInfo([self._vg_name], False)
713
    if not vg_info:
714
      base.ThrowError("Can't compute VG info for vg %s", self._vg_name)
715
    free_size, _, _ = vg_info[0]
716
    if free_size < size:
717
      base.ThrowError("Not enough free space: required %s,"
718
                      " available %s", size, free_size)
719

    
720
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
721
                               "-n%s" % snap_name, self.dev_path]))
722

    
723
    return (self._vg_name, snap_name)
724

    
725
  def _RemoveOldInfo(self):
726
    """Try to remove old tags from the lv.
727

728
    """
729
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
730
                           self.dev_path])
731
    _CheckResult(result)
732

    
733
    raw_tags = result.stdout.strip()
734
    if raw_tags:
735
      for tag in raw_tags.split(","):
736
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
737
                                   tag.strip(), self.dev_path]))
738

    
739
  def SetInfo(self, text):
740
    """Update metadata with info text.
741

742
    """
743
    base.BlockDev.SetInfo(self, text)
744

    
745
    self._RemoveOldInfo()
746

    
747
    # Replace invalid characters
748
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
749
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
750

    
751
    # Only up to 128 characters are allowed
752
    text = text[:128]
753

    
754
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
755

    
756
  def _GetGrowthAvaliabilityExclStor(self):
757
    """Return how much the disk can grow with exclusive storage.
758

759
    @rtype: float
760
    @return: available space in Mib
761

762
    """
763
    pvs_info = self.GetPVInfo([self._vg_name])
764
    if not pvs_info:
765
      base.ThrowError("Cannot get information about PVs for %s", self.dev_path)
766
    std_pv_size = self._GetStdPvSize(pvs_info)
767
    free_space = sum(pvi.free - (pvi.size - std_pv_size)
768
                        for pvi in pvs_info
769
                        if pvi.name in self.pv_names)
770
    return free_space
771

    
772
  def Grow(self, amount, dryrun, backingstore, excl_stor):
773
    """Grow the logical volume.
774

775
    """
776
    if not backingstore:
777
      return
778
    if self.pe_size is None or self.stripe_count is None:
779
      if not self.Attach():
780
        base.ThrowError("Can't attach to LV during Grow()")
781
    full_stripe_size = self.pe_size * self.stripe_count
782
    # pe_size is in KB
783
    amount *= 1024
784
    rest = amount % full_stripe_size
785
    if rest != 0:
786
      amount += full_stripe_size - rest
787
    cmd = ["lvextend", "-L", "+%dk" % amount]
788
    if dryrun:
789
      cmd.append("--test")
790
    if excl_stor:
791
      free_space = self._GetGrowthAvaliabilityExclStor()
792
      # amount is in KiB, free_space in MiB
793
      if amount > free_space * 1024:
794
        base.ThrowError("Not enough free space to grow %s: %d MiB required,"
795
                        " %d available", self.dev_path, amount / 1024,
796
                        free_space)
797
      # Disk growth doesn't grow the number of spindles, so we must stay within
798
      # our assigned volumes
799
      pvlist = list(self.pv_names)
800
    else:
801
      pvlist = []
802
    # we try multiple algorithms since the 'best' ones might not have
803
    # space available in the right place, but later ones might (since
804
    # they have less constraints); also note that only recent LVM
805
    # supports 'cling'
806
    for alloc_policy in "contiguous", "cling", "normal":
807
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path] +
808
                            pvlist)
809
      if not result.failed:
810
        return
811
    base.ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
812

    
813
  def GetActualSpindles(self):
814
    """Return the number of spindles used.
815

816
    """
817
    assert self.attached, "BlockDevice not attached in GetActualSpindles()"
818
    return len(self.pv_names)
819

    
820

    
821
class FileStorage(base.BlockDev):
822
  """File device.
823

824
  This class represents the a file storage backend device.
825

826
  The unique_id for the file device is a (file_driver, file_path) tuple.
827

828
  """
829
  def __init__(self, unique_id, children, size, params):
830
    """Initalizes a file device backend.
831

832
    """
833
    if children:
834
      raise errors.BlockDeviceError("Invalid setup for file device")
835
    super(FileStorage, self).__init__(unique_id, children, size, params)
836
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
837
      raise ValueError("Invalid configuration data %s" % str(unique_id))
838
    self.driver = unique_id[0]
839
    self.dev_path = unique_id[1]
840

    
841
    CheckFileStoragePath(self.dev_path)
842

    
843
    self.Attach()
844

    
845
  def Assemble(self):
846
    """Assemble the device.
847

848
    Checks whether the file device exists, raises BlockDeviceError otherwise.
849

850
    """
851
    if not os.path.exists(self.dev_path):
852
      base.ThrowError("File device '%s' does not exist" % self.dev_path)
853

    
854
  def Shutdown(self):
855
    """Shutdown the device.
856

857
    This is a no-op for the file type, as we don't deactivate
858
    the file on shutdown.
859

860
    """
861
    pass
862

    
863
  def Open(self, force=False):
864
    """Make the device ready for I/O.
865

866
    This is a no-op for the file type.
867

868
    """
869
    pass
870

    
871
  def Close(self):
872
    """Notifies that the device will no longer be used for I/O.
873

874
    This is a no-op for the file type.
875

876
    """
877
    pass
878

    
879
  def Remove(self):
880
    """Remove the file backing the block device.
881

882
    @rtype: boolean
883
    @return: True if the removal was successful
884

885
    """
886
    try:
887
      os.remove(self.dev_path)
888
    except OSError, err:
889
      if err.errno != errno.ENOENT:
890
        base.ThrowError("Can't remove file '%s': %s", self.dev_path, err)
891

    
892
  def Rename(self, new_id):
893
    """Renames the file.
894

895
    """
896
    # TODO: implement rename for file-based storage
897
    base.ThrowError("Rename is not supported for file-based storage")
898

    
899
  def Grow(self, amount, dryrun, backingstore, excl_stor):
900
    """Grow the file
901

902
    @param amount: the amount (in mebibytes) to grow with
903

904
    """
905
    if not backingstore:
906
      return
907
    # Check that the file exists
908
    self.Assemble()
909
    current_size = self.GetActualSize()
910
    new_size = current_size + amount * 1024 * 1024
911
    assert new_size > current_size, "Cannot Grow with a negative amount"
912
    # We can't really simulate the growth
913
    if dryrun:
914
      return
915
    try:
916
      f = open(self.dev_path, "a+")
917
      f.truncate(new_size)
918
      f.close()
919
    except EnvironmentError, err:
920
      base.ThrowError("Error in file growth: %", str(err))
921

    
922
  def Attach(self):
923
    """Attach to an existing file.
924

925
    Check if this file already exists.
926

927
    @rtype: boolean
928
    @return: True if file exists
929

930
    """
931
    self.attached = os.path.exists(self.dev_path)
932
    return self.attached
933

    
934
  def GetActualSize(self):
935
    """Return the actual disk size.
936

937
    @note: the device needs to be active when this is called
938

939
    """
940
    assert self.attached, "BlockDevice not attached in GetActualSize()"
941
    try:
942
      st = os.stat(self.dev_path)
943
      return st.st_size
944
    except OSError, err:
945
      base.ThrowError("Can't stat %s: %s", self.dev_path, err)
946

    
947
  @classmethod
948
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
949
    """Create a new file.
950

951
    @param size: the size of file in MiB
952

953
    @rtype: L{bdev.FileStorage}
954
    @return: an instance of FileStorage
955

956
    """
957
    if excl_stor:
958
      raise errors.ProgrammerError("FileStorage device requested with"
959
                                   " exclusive_storage")
960
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
961
      raise ValueError("Invalid configuration data %s" % str(unique_id))
962

    
963
    dev_path = unique_id[1]
964

    
965
    CheckFileStoragePath(dev_path)
966

    
967
    try:
968
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
969
      f = os.fdopen(fd, "w")
970
      f.truncate(size * 1024 * 1024)
971
      f.close()
972
    except EnvironmentError, err:
973
      if err.errno == errno.EEXIST:
974
        base.ThrowError("File already existing: %s", dev_path)
975
      base.ThrowError("Error in file creation: %", str(err))
976

    
977
    return FileStorage(unique_id, children, size, params)
978

    
979

    
980
class PersistentBlockDevice(base.BlockDev):
981
  """A block device with persistent node
982

983
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
984
  udev helpers are probably required to give persistent, human-friendly
985
  names.
986

987
  For the time being, pathnames are required to lie under /dev.
988

989
  """
990
  def __init__(self, unique_id, children, size, params):
991
    """Attaches to a static block device.
992

993
    The unique_id is a path under /dev.
994

995
    """
996
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
997
                                                params)
998
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
999
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1000
    self.dev_path = unique_id[1]
1001
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
1002
      raise ValueError("Full path '%s' lies outside /dev" %
1003
                              os.path.realpath(self.dev_path))
1004
    # TODO: this is just a safety guard checking that we only deal with devices
1005
    # we know how to handle. In the future this will be integrated with
1006
    # external storage backends and possible values will probably be collected
1007
    # from the cluster configuration.
1008
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
1009
      raise ValueError("Got persistent block device of invalid type: %s" %
1010
                       unique_id[0])
1011

    
1012
    self.major = self.minor = None
1013
    self.Attach()
1014

    
1015
  @classmethod
1016
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1017
    """Create a new device
1018

1019
    This is a noop, we only return a PersistentBlockDevice instance
1020

1021
    """
1022
    if excl_stor:
1023
      raise errors.ProgrammerError("Persistent block device requested with"
1024
                                   " exclusive_storage")
1025
    return PersistentBlockDevice(unique_id, children, 0, params)
1026

    
1027
  def Remove(self):
1028
    """Remove a device
1029

1030
    This is a noop
1031

1032
    """
1033
    pass
1034

    
1035
  def Rename(self, new_id):
1036
    """Rename this device.
1037

1038
    """
1039
    base.ThrowError("Rename is not supported for PersistentBlockDev storage")
1040

    
1041
  def Attach(self):
1042
    """Attach to an existing block device.
1043

1044

1045
    """
1046
    self.attached = False
1047
    try:
1048
      st = os.stat(self.dev_path)
1049
    except OSError, err:
1050
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1051
      return False
1052

    
1053
    if not stat.S_ISBLK(st.st_mode):
1054
      logging.error("%s is not a block device", self.dev_path)
1055
      return False
1056

    
1057
    self.major = os.major(st.st_rdev)
1058
    self.minor = os.minor(st.st_rdev)
1059
    self.attached = True
1060

    
1061
    return True
1062

    
1063
  def Assemble(self):
1064
    """Assemble the device.
1065

1066
    """
1067
    pass
1068

    
1069
  def Shutdown(self):
1070
    """Shutdown the device.
1071

1072
    """
1073
    pass
1074

    
1075
  def Open(self, force=False):
1076
    """Make the device ready for I/O.
1077

1078
    """
1079
    pass
1080

    
1081
  def Close(self):
1082
    """Notifies that the device will no longer be used for I/O.
1083

1084
    """
1085
    pass
1086

    
1087
  def Grow(self, amount, dryrun, backingstore, excl_stor):
1088
    """Grow the logical volume.
1089

1090
    """
1091
    base.ThrowError("Grow is not supported for PersistentBlockDev storage")
1092

    
1093

    
1094
class RADOSBlockDevice(base.BlockDev):
1095
  """A RADOS Block Device (rbd).
1096

1097
  This class implements the RADOS Block Device for the backend. You need
1098
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
1099
  this to be functional.
1100

1101
  """
1102
  def __init__(self, unique_id, children, size, params):
1103
    """Attaches to an rbd device.
1104

1105
    """
1106
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
1107
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1108
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1109

    
1110
    self.driver, self.rbd_name = unique_id
1111

    
1112
    self.major = self.minor = None
1113
    self.Attach()
1114

    
1115
  @classmethod
1116
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1117
    """Create a new rbd device.
1118

1119
    Provision a new rbd volume inside a RADOS pool.
1120

1121
    """
1122
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1123
      raise errors.ProgrammerError("Invalid configuration data %s" %
1124
                                   str(unique_id))
1125
    if excl_stor:
1126
      raise errors.ProgrammerError("RBD device requested with"
1127
                                   " exclusive_storage")
1128
    rbd_pool = params[constants.LDP_POOL]
1129
    rbd_name = unique_id[1]
1130

    
1131
    # Provision a new rbd volume (Image) inside the RADOS cluster.
1132
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
1133
           rbd_name, "--size", "%s" % size]
1134
    result = utils.RunCmd(cmd)
1135
    if result.failed:
1136
      base.ThrowError("rbd creation failed (%s): %s",
1137
                      result.fail_reason, result.output)
1138

    
1139
    return RADOSBlockDevice(unique_id, children, size, params)
1140

    
1141
  def Remove(self):
1142
    """Remove the rbd device.
1143

1144
    """
1145
    rbd_pool = self.params[constants.LDP_POOL]
1146
    rbd_name = self.unique_id[1]
1147

    
1148
    if not self.minor and not self.Attach():
1149
      # The rbd device doesn't exist.
1150
      return
1151

    
1152
    # First shutdown the device (remove mappings).
1153
    self.Shutdown()
1154

    
1155
    # Remove the actual Volume (Image) from the RADOS cluster.
1156
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
1157
    result = utils.RunCmd(cmd)
1158
    if result.failed:
1159
      base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
1160
                      result.fail_reason, result.output)
1161

    
1162
  def Rename(self, new_id):
1163
    """Rename this device.
1164

1165
    """
1166
    pass
1167

    
1168
  def Attach(self):
1169
    """Attach to an existing rbd device.
1170

1171
    This method maps the rbd volume that matches our name with
1172
    an rbd device and then attaches to this device.
1173

1174
    """
1175
    self.attached = False
1176

    
1177
    # Map the rbd volume to a block device under /dev
1178
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
1179

    
1180
    try:
1181
      st = os.stat(self.dev_path)
1182
    except OSError, err:
1183
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1184
      return False
1185

    
1186
    if not stat.S_ISBLK(st.st_mode):
1187
      logging.error("%s is not a block device", self.dev_path)
1188
      return False
1189

    
1190
    self.major = os.major(st.st_rdev)
1191
    self.minor = os.minor(st.st_rdev)
1192
    self.attached = True
1193

    
1194
    return True
1195

    
1196
  def _MapVolumeToBlockdev(self, unique_id):
1197
    """Maps existing rbd volumes to block devices.
1198

1199
    This method should be idempotent if the mapping already exists.
1200

1201
    @rtype: string
1202
    @return: the block device path that corresponds to the volume
1203

1204
    """
1205
    pool = self.params[constants.LDP_POOL]
1206
    name = unique_id[1]
1207

    
1208
    # Check if the mapping already exists.
1209
    rbd_dev = self._VolumeToBlockdev(pool, name)
1210
    if rbd_dev:
1211
      # The mapping exists. Return it.
1212
      return rbd_dev
1213

    
1214
    # The mapping doesn't exist. Create it.
1215
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
1216
    result = utils.RunCmd(map_cmd)
1217
    if result.failed:
1218
      base.ThrowError("rbd map failed (%s): %s",
1219
                      result.fail_reason, result.output)
1220

    
1221
    # Find the corresponding rbd device.
1222
    rbd_dev = self._VolumeToBlockdev(pool, name)
1223
    if not rbd_dev:
1224
      base.ThrowError("rbd map succeeded, but could not find the rbd block"
1225
                      " device in output of showmapped, for volume: %s", name)
1226

    
1227
    # The device was successfully mapped. Return it.
1228
    return rbd_dev
1229

    
1230
  @classmethod
1231
  def _VolumeToBlockdev(cls, pool, volume_name):
1232
    """Do the 'volume name'-to-'rbd block device' resolving.
1233

1234
    @type pool: string
1235
    @param pool: RADOS pool to use
1236
    @type volume_name: string
1237
    @param volume_name: the name of the volume whose device we search for
1238
    @rtype: string or None
1239
    @return: block device path if the volume is mapped, else None
1240

1241
    """
1242
    try:
1243
      # Newer versions of the rbd tool support json output formatting. Use it
1244
      # if available.
1245
      showmap_cmd = [
1246
        constants.RBD_CMD,
1247
        "showmapped",
1248
        "-p",
1249
        pool,
1250
        "--format",
1251
        "json"
1252
        ]
1253
      result = utils.RunCmd(showmap_cmd)
1254
      if result.failed:
1255
        logging.error("rbd JSON output formatting returned error (%s): %s,"
1256
                      "falling back to plain output parsing",
1257
                      result.fail_reason, result.output)
1258
        raise RbdShowmappedJsonError
1259

    
1260
      return cls._ParseRbdShowmappedJson(result.output, volume_name)
1261
    except RbdShowmappedJsonError:
1262
      # For older versions of rbd, we have to parse the plain / text output
1263
      # manually.
1264
      showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
1265
      result = utils.RunCmd(showmap_cmd)
1266
      if result.failed:
1267
        base.ThrowError("rbd showmapped failed (%s): %s",
1268
                        result.fail_reason, result.output)
1269

    
1270
      return cls._ParseRbdShowmappedPlain(result.output, volume_name)
1271

    
1272
  @staticmethod
1273
  def _ParseRbdShowmappedJson(output, volume_name):
1274
    """Parse the json output of `rbd showmapped'.
1275

1276
    This method parses the json output of `rbd showmapped' and returns the rbd
1277
    block device path (e.g. /dev/rbd0) that matches the given rbd volume.
1278

1279
    @type output: string
1280
    @param output: the json output of `rbd showmapped'
1281
    @type volume_name: string
1282
    @param volume_name: the name of the volume whose device we search for
1283
    @rtype: string or None
1284
    @return: block device path if the volume is mapped, else None
1285

1286
    """
1287
    try:
1288
      devices = serializer.LoadJson(output)
1289
    except ValueError, err:
1290
      base.ThrowError("Unable to parse JSON data: %s" % err)
1291

    
1292
    rbd_dev = None
1293
    for d in devices.values(): # pylint: disable=E1103
1294
      try:
1295
        name = d["name"]
1296
      except KeyError:
1297
        base.ThrowError("'name' key missing from json object %s", devices)
1298

    
1299
      if name == volume_name:
1300
        if rbd_dev is not None:
1301
          base.ThrowError("rbd volume %s is mapped more than once", volume_name)
1302

    
1303
        rbd_dev = d["device"]
1304

    
1305
    return rbd_dev
1306

    
1307
  @staticmethod
1308
  def _ParseRbdShowmappedPlain(output, volume_name):
1309
    """Parse the (plain / text) output of `rbd showmapped'.
1310

1311
    This method parses the output of `rbd showmapped' and returns
1312
    the rbd block device path (e.g. /dev/rbd0) that matches the
1313
    given rbd volume.
1314

1315
    @type output: string
1316
    @param output: the plain text output of `rbd showmapped'
1317
    @type volume_name: string
1318
    @param volume_name: the name of the volume whose device we search for
1319
    @rtype: string or None
1320
    @return: block device path if the volume is mapped, else None
1321

1322
    """
1323
    allfields = 5
1324
    volumefield = 2
1325
    devicefield = 4
1326

    
1327
    lines = output.splitlines()
1328

    
1329
    # Try parsing the new output format (ceph >= 0.55).
1330
    splitted_lines = map(lambda l: l.split(), lines)
1331

    
1332
    # Check for empty output.
1333
    if not splitted_lines:
1334
      return None
1335

    
1336
    # Check showmapped output, to determine number of fields.
1337
    field_cnt = len(splitted_lines[0])
1338
    if field_cnt != allfields:
1339
      # Parsing the new format failed. Fallback to parsing the old output
1340
      # format (< 0.55).
1341
      splitted_lines = map(lambda l: l.split("\t"), lines)
1342
      if field_cnt != allfields:
1343
        base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
1344
                        " found %s", allfields, field_cnt)
1345

    
1346
    matched_lines = \
1347
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
1348
             splitted_lines)
1349

    
1350
    if len(matched_lines) > 1:
1351
      base.ThrowError("rbd volume %s mapped more than once", volume_name)
1352

    
1353
    if matched_lines:
1354
      # rbd block device found. Return it.
1355
      rbd_dev = matched_lines[0][devicefield]
1356
      return rbd_dev
1357

    
1358
    # The given volume is not mapped.
1359
    return None
1360

    
1361
  def Assemble(self):
1362
    """Assemble the device.
1363

1364
    """
1365
    pass
1366

    
1367
  def Shutdown(self):
1368
    """Shutdown the device.
1369

1370
    """
1371
    if not self.minor and not self.Attach():
1372
      # The rbd device doesn't exist.
1373
      return
1374

    
1375
    # Unmap the block device from the Volume.
1376
    self._UnmapVolumeFromBlockdev(self.unique_id)
1377

    
1378
    self.minor = None
1379
    self.dev_path = None
1380

    
1381
  def _UnmapVolumeFromBlockdev(self, unique_id):
1382
    """Unmaps the rbd device from the Volume it is mapped.
1383

1384
    Unmaps the rbd device from the Volume it was previously mapped to.
1385
    This method should be idempotent if the Volume isn't mapped.
1386

1387
    """
1388
    pool = self.params[constants.LDP_POOL]
1389
    name = unique_id[1]
1390

    
1391
    # Check if the mapping already exists.
1392
    rbd_dev = self._VolumeToBlockdev(pool, name)
1393

    
1394
    if rbd_dev:
1395
      # The mapping exists. Unmap the rbd device.
1396
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
1397
      result = utils.RunCmd(unmap_cmd)
1398
      if result.failed:
1399
        base.ThrowError("rbd unmap failed (%s): %s",
1400
                        result.fail_reason, result.output)
1401

    
1402
  def Open(self, force=False):
1403
    """Make the device ready for I/O.
1404

1405
    """
1406
    pass
1407

    
1408
  def Close(self):
1409
    """Notifies that the device will no longer be used for I/O.
1410

1411
    """
1412
    pass
1413

    
1414
  def Grow(self, amount, dryrun, backingstore, excl_stor):
1415
    """Grow the Volume.
1416

1417
    @type amount: integer
1418
    @param amount: the amount (in mebibytes) to grow with
1419
    @type dryrun: boolean
1420
    @param dryrun: whether to execute the operation in simulation mode
1421
        only, without actually increasing the size
1422

1423
    """
1424
    if not backingstore:
1425
      return
1426
    if not self.Attach():
1427
      base.ThrowError("Can't attach to rbd device during Grow()")
1428

    
1429
    if dryrun:
1430
      # the rbd tool does not support dry runs of resize operations.
1431
      # Since rbd volumes are thinly provisioned, we assume
1432
      # there is always enough free space for the operation.
1433
      return
1434

    
1435
    rbd_pool = self.params[constants.LDP_POOL]
1436
    rbd_name = self.unique_id[1]
1437
    new_size = self.size + amount
1438

    
1439
    # Resize the rbd volume (Image) inside the RADOS cluster.
1440
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
1441
           rbd_name, "--size", "%s" % new_size]
1442
    result = utils.RunCmd(cmd)
1443
    if result.failed:
1444
      base.ThrowError("rbd resize failed (%s): %s",
1445
                      result.fail_reason, result.output)
1446

    
1447

    
1448
class ExtStorageDevice(base.BlockDev):
1449
  """A block device provided by an ExtStorage Provider.
1450

1451
  This class implements the External Storage Interface, which means
1452
  handling of the externally provided block devices.
1453

1454
  """
1455
  def __init__(self, unique_id, children, size, params):
1456
    """Attaches to an extstorage block device.
1457

1458
    """
1459
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
1460
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1461
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1462

    
1463
    self.driver, self.vol_name = unique_id
1464
    self.ext_params = params
1465

    
1466
    self.major = self.minor = None
1467
    self.Attach()
1468

    
1469
  @classmethod
1470
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1471
    """Create a new extstorage device.
1472

1473
    Provision a new volume using an extstorage provider, which will
1474
    then be mapped to a block device.
1475

1476
    """
1477
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1478
      raise errors.ProgrammerError("Invalid configuration data %s" %
1479
                                   str(unique_id))
1480
    if excl_stor:
1481
      raise errors.ProgrammerError("extstorage device requested with"
1482
                                   " exclusive_storage")
1483

    
1484
    # Call the External Storage's create script,
1485
    # to provision a new Volume inside the External Storage
1486
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
1487
                      params, str(size))
1488

    
1489
    return ExtStorageDevice(unique_id, children, size, params)
1490

    
1491
  def Remove(self):
1492
    """Remove the extstorage device.
1493

1494
    """
1495
    if not self.minor and not self.Attach():
1496
      # The extstorage device doesn't exist.
1497
      return
1498

    
1499
    # First shutdown the device (remove mappings).
1500
    self.Shutdown()
1501

    
1502
    # Call the External Storage's remove script,
1503
    # to remove the Volume from the External Storage
1504
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
1505
                      self.ext_params)
1506

    
1507
  def Rename(self, new_id):
1508
    """Rename this device.
1509

1510
    """
1511
    pass
1512

    
1513
  def Attach(self):
1514
    """Attach to an existing extstorage device.
1515

1516
    This method maps the extstorage volume that matches our name with
1517
    a corresponding block device and then attaches to this device.
1518

1519
    """
1520
    self.attached = False
1521

    
1522
    # Call the External Storage's attach script,
1523
    # to attach an existing Volume to a block device under /dev
1524
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
1525
                                      self.unique_id, self.ext_params)
1526

    
1527
    try:
1528
      st = os.stat(self.dev_path)
1529
    except OSError, err:
1530
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1531
      return False
1532

    
1533
    if not stat.S_ISBLK(st.st_mode):
1534
      logging.error("%s is not a block device", self.dev_path)
1535
      return False
1536

    
1537
    self.major = os.major(st.st_rdev)
1538
    self.minor = os.minor(st.st_rdev)
1539
    self.attached = True
1540

    
1541
    return True
1542

    
1543
  def Assemble(self):
1544
    """Assemble the device.
1545

1546
    """
1547
    pass
1548

    
1549
  def Shutdown(self):
1550
    """Shutdown the device.
1551

1552
    """
1553
    if not self.minor and not self.Attach():
1554
      # The extstorage device doesn't exist.
1555
      return
1556

    
1557
    # Call the External Storage's detach script,
1558
    # to detach an existing Volume from it's block device under /dev
1559
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
1560
                      self.ext_params)
1561

    
1562
    self.minor = None
1563
    self.dev_path = None
1564

    
1565
  def Open(self, force=False):
1566
    """Make the device ready for I/O.
1567

1568
    """
1569
    pass
1570

    
1571
  def Close(self):
1572
    """Notifies that the device will no longer be used for I/O.
1573

1574
    """
1575
    pass
1576

    
1577
  def Grow(self, amount, dryrun, backingstore, excl_stor):
1578
    """Grow the Volume.
1579

1580
    @type amount: integer
1581
    @param amount: the amount (in mebibytes) to grow with
1582
    @type dryrun: boolean
1583
    @param dryrun: whether to execute the operation in simulation mode
1584
        only, without actually increasing the size
1585

1586
    """
1587
    if not backingstore:
1588
      return
1589
    if not self.Attach():
1590
      base.ThrowError("Can't attach to extstorage device during Grow()")
1591

    
1592
    if dryrun:
1593
      # we do not support dry runs of resize operations for now.
1594
      return
1595

    
1596
    new_size = self.size + amount
1597

    
1598
    # Call the External Storage's grow script,
1599
    # to grow an existing Volume inside the External Storage
1600
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
1601
                      self.ext_params, str(self.size), grow=str(new_size))
1602

    
1603
  def SetInfo(self, text):
1604
    """Update metadata with info text.
1605

1606
    """
1607
    # Replace invalid characters
1608
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1609
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1610

    
1611
    # Only up to 128 characters are allowed
1612
    text = text[:128]
1613

    
1614
    # Call the External Storage's setinfo script,
1615
    # to set metadata for an existing Volume inside the External Storage
1616
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
1617
                      self.ext_params, metadata=text)
1618

    
1619

    
1620
def _ExtStorageAction(action, unique_id, ext_params,
1621
                      size=None, grow=None, metadata=None):
1622
  """Take an External Storage action.
1623

1624
  Take an External Storage action concerning or affecting
1625
  a specific Volume inside the External Storage.
1626

1627
  @type action: string
1628
  @param action: which action to perform. One of:
1629
                 create / remove / grow / attach / detach
1630
  @type unique_id: tuple (driver, vol_name)
1631
  @param unique_id: a tuple containing the type of ExtStorage (driver)
1632
                    and the Volume name
1633
  @type ext_params: dict
1634
  @param ext_params: ExtStorage parameters
1635
  @type size: integer
1636
  @param size: the size of the Volume in mebibytes
1637
  @type grow: integer
1638
  @param grow: the new size in mebibytes (after grow)
1639
  @type metadata: string
1640
  @param metadata: metadata info of the Volume, for use by the provider
1641
  @rtype: None or a block device path (during attach)
1642

1643
  """
1644
  driver, vol_name = unique_id
1645

    
1646
  # Create an External Storage instance of type `driver'
1647
  status, inst_es = ExtStorageFromDisk(driver)
1648
  if not status:
1649
    base.ThrowError("%s" % inst_es)
1650

    
1651
  # Create the basic environment for the driver's scripts
1652
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
1653
                                      grow, metadata)
1654

    
1655
  # Do not use log file for action `attach' as we need
1656
  # to get the output from RunResult
1657
  # TODO: find a way to have a log file for attach too
1658
  logfile = None
1659
  if action is not constants.ES_ACTION_ATTACH:
1660
    logfile = _VolumeLogName(action, driver, vol_name)
1661

    
1662
  # Make sure the given action results in a valid script
1663
  if action not in constants.ES_SCRIPTS:
1664
    base.ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
1665
                    action)
1666

    
1667
  # Find out which external script to run according the given action
1668
  script_name = action + "_script"
1669
  script = getattr(inst_es, script_name)
1670

    
1671
  # Run the external script
1672
  result = utils.RunCmd([script], env=create_env,
1673
                        cwd=inst_es.path, output=logfile,)
1674
  if result.failed:
1675
    logging.error("External storage's %s command '%s' returned"
1676
                  " error: %s, logfile: %s, output: %s",
1677
                  action, result.cmd, result.fail_reason,
1678
                  logfile, result.output)
1679

    
1680
    # If logfile is 'None' (during attach), it breaks TailFile
1681
    # TODO: have a log file for attach too
1682
    if action is not constants.ES_ACTION_ATTACH:
1683
      lines = [utils.SafeEncode(val)
1684
               for val in utils.TailFile(logfile, lines=20)]
1685
    else:
1686
      lines = result.output[-20:]
1687

    
1688
    base.ThrowError("External storage's %s script failed (%s), last"
1689
                    " lines of output:\n%s",
1690
                    action, result.fail_reason, "\n".join(lines))
1691

    
1692
  if action == constants.ES_ACTION_ATTACH:
1693
    return result.stdout
1694

    
1695

    
1696
def ExtStorageFromDisk(name, base_dir=None):
1697
  """Create an ExtStorage instance from disk.
1698

1699
  This function will return an ExtStorage instance
1700
  if the given name is a valid ExtStorage name.
1701

1702
  @type base_dir: string
1703
  @keyword base_dir: Base directory containing ExtStorage installations.
1704
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
1705
  @rtype: tuple
1706
  @return: True and the ExtStorage instance if we find a valid one, or
1707
      False and the diagnose message on error
1708

1709
  """
1710
  if base_dir is None:
1711
    es_base_dir = pathutils.ES_SEARCH_PATH
1712
  else:
1713
    es_base_dir = [base_dir]
1714

    
1715
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
1716

    
1717
  if es_dir is None:
1718
    return False, ("Directory for External Storage Provider %s not"
1719
                   " found in search path" % name)
1720

    
1721
  # ES Files dictionary, we will populate it with the absolute path
1722
  # names; if the value is True, then it is a required file, otherwise
1723
  # an optional one
1724
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
1725

    
1726
  es_files[constants.ES_PARAMETERS_FILE] = True
1727

    
1728
  for (filename, _) in es_files.items():
1729
    es_files[filename] = utils.PathJoin(es_dir, filename)
1730

    
1731
    try:
1732
      st = os.stat(es_files[filename])
1733
    except EnvironmentError, err:
1734
      return False, ("File '%s' under path '%s' is missing (%s)" %
1735
                     (filename, es_dir, utils.ErrnoOrStr(err)))
1736

    
1737
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1738
      return False, ("File '%s' under path '%s' is not a regular file" %
1739
                     (filename, es_dir))
1740

    
1741
    if filename in constants.ES_SCRIPTS:
1742
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
1743
        return False, ("File '%s' under path '%s' is not executable" %
1744
                       (filename, es_dir))
1745

    
1746
  parameters = []
1747
  if constants.ES_PARAMETERS_FILE in es_files:
1748
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
1749
    try:
1750
      parameters = utils.ReadFile(parameters_file).splitlines()
1751
    except EnvironmentError, err:
1752
      return False, ("Error while reading the EXT parameters file at %s: %s" %
1753
                     (parameters_file, utils.ErrnoOrStr(err)))
1754
    parameters = [v.split(None, 1) for v in parameters]
1755

    
1756
  es_obj = \
1757
    objects.ExtStorage(name=name, path=es_dir,
1758
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
1759
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
1760
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
1761
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
1762
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
1763
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
1764
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
1765
                       supported_parameters=parameters)
1766
  return True, es_obj
1767

    
1768

    
1769
def _ExtStorageEnvironment(unique_id, ext_params,
1770
                           size=None, grow=None, metadata=None):
1771
  """Calculate the environment for an External Storage script.
1772

1773
  @type unique_id: tuple (driver, vol_name)
1774
  @param unique_id: ExtStorage pool and name of the Volume
1775
  @type ext_params: dict
1776
  @param ext_params: the EXT parameters
1777
  @type size: string
1778
  @param size: size of the Volume (in mebibytes)
1779
  @type grow: string
1780
  @param grow: new size of Volume after grow (in mebibytes)
1781
  @type metadata: string
1782
  @param metadata: metadata info of the Volume
1783
  @rtype: dict
1784
  @return: dict of environment variables
1785

1786
  """
1787
  vol_name = unique_id[1]
1788

    
1789
  result = {}
1790
  result["VOL_NAME"] = vol_name
1791

    
1792
  # EXT params
1793
  for pname, pvalue in ext_params.items():
1794
    result["EXTP_%s" % pname.upper()] = str(pvalue)
1795

    
1796
  if size is not None:
1797
    result["VOL_SIZE"] = size
1798

    
1799
  if grow is not None:
1800
    result["VOL_NEW_SIZE"] = grow
1801

    
1802
  if metadata is not None:
1803
    result["VOL_METADATA"] = metadata
1804

    
1805
  return result
1806

    
1807

    
1808
def _VolumeLogName(kind, es_name, volume):
1809
  """Compute the ExtStorage log filename for a given Volume and operation.
1810

1811
  @type kind: string
1812
  @param kind: the operation type (e.g. create, remove etc.)
1813
  @type es_name: string
1814
  @param es_name: the ExtStorage name
1815
  @type volume: string
1816
  @param volume: the name of the Volume inside the External Storage
1817

1818
  """
1819
  # Check if the extstorage log dir is a valid dir
1820
  if not os.path.isdir(pathutils.LOG_ES_DIR):
1821
    base.ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
1822

    
1823
  # TODO: Use tempfile.mkstemp to create unique filename
1824
  basename = ("%s-%s-%s-%s.log" %
1825
              (kind, es_name, volume, utils.TimestampForFilename()))
1826
  return utils.PathJoin(pathutils.LOG_ES_DIR, basename)
1827

    
1828

    
1829
DEV_MAP = {
1830
  constants.LD_LV: LogicalVolume,
1831
  constants.LD_DRBD8: drbd.DRBD8Dev,
1832
  constants.LD_BLOCKDEV: PersistentBlockDevice,
1833
  constants.LD_RBD: RADOSBlockDevice,
1834
  constants.LD_EXT: ExtStorageDevice,
1835
  }
1836

    
1837
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
1838
  DEV_MAP[constants.LD_FILE] = FileStorage
1839

    
1840

    
1841
def _VerifyDiskType(dev_type):
1842
  if dev_type not in DEV_MAP:
1843
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
1844

    
1845

    
1846
def _VerifyDiskParams(disk):
1847
  """Verifies if all disk parameters are set.
1848

1849
  """
1850
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
1851
  if missing:
1852
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
1853
                                 missing)
1854

    
1855

    
1856
def FindDevice(disk, children):
1857
  """Search for an existing, assembled device.
1858

1859
  This will succeed only if the device exists and is assembled, but it
1860
  does not do any actions in order to activate the device.
1861

1862
  @type disk: L{objects.Disk}
1863
  @param disk: the disk object to find
1864
  @type children: list of L{bdev.BlockDev}
1865
  @param children: the list of block devices that are children of the device
1866
                  represented by the disk parameter
1867

1868
  """
1869
  _VerifyDiskType(disk.dev_type)
1870
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1871
                                  disk.params)
1872
  if not device.attached:
1873
    return None
1874
  return device
1875

    
1876

    
1877
def Assemble(disk, children):
1878
  """Try to attach or assemble an existing device.
1879

1880
  This will attach to assemble the device, as needed, to bring it
1881
  fully up. It must be safe to run on already-assembled devices.
1882

1883
  @type disk: L{objects.Disk}
1884
  @param disk: the disk object to assemble
1885
  @type children: list of L{bdev.BlockDev}
1886
  @param children: the list of block devices that are children of the device
1887
                  represented by the disk parameter
1888

1889
  """
1890
  _VerifyDiskType(disk.dev_type)
1891
  _VerifyDiskParams(disk)
1892
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1893
                                  disk.params)
1894
  device.Assemble()
1895
  return device
1896

    
1897

    
1898
def Create(disk, children, excl_stor):
1899
  """Create a device.
1900

1901
  @type disk: L{objects.Disk}
1902
  @param disk: the disk object to create
1903
  @type children: list of L{bdev.BlockDev}
1904
  @param children: the list of block devices that are children of the device
1905
                  represented by the disk parameter
1906
  @type excl_stor: boolean
1907
  @param excl_stor: Whether exclusive_storage is active
1908
  @rtype: L{bdev.BlockDev}
1909
  @return: the created device, or C{None} in case of an error
1910

1911
  """
1912
  _VerifyDiskType(disk.dev_type)
1913
  _VerifyDiskParams(disk)
1914
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
1915
                                         disk.spindles, disk.params, excl_stor)
1916
  return device