Statistics
| Branch: | Tag: | Revision:

root / lib / storage / bdev.py @ 7c848a6a

History | View | Annotate | Download (57.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import errno
26
import stat
27
import os
28
import logging
29
import math
30

    
31
from ganeti import utils
32
from ganeti import errors
33
from ganeti import constants
34
from ganeti import objects
35
from ganeti import compat
36
from ganeti import pathutils
37
from ganeti import serializer
38
from ganeti.storage import drbd
39
from ganeti.storage import base
40

    
41

    
42
class RbdShowmappedJsonError(Exception):
43
  """`rbd showmmapped' JSON formatting error Exception class.
44

45
  """
46
  pass
47

    
48

    
49
def _CheckResult(result):
50
  """Throws an error if the given result is a failed one.
51

52
  @param result: result from RunCmd
53

54
  """
55
  if result.failed:
56
    base.ThrowError("Command: %s error: %s - %s",
57
                    result.cmd, result.fail_reason, result.output)
58

    
59

    
60
def _GetForbiddenFileStoragePaths():
61
  """Builds a list of path prefixes which shouldn't be used for file storage.
62

63
  @rtype: frozenset
64

65
  """
66
  paths = set([
67
    "/boot",
68
    "/dev",
69
    "/etc",
70
    "/home",
71
    "/proc",
72
    "/root",
73
    "/sys",
74
    ])
75

    
76
  for prefix in ["", "/usr", "/usr/local"]:
77
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
78
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
79

    
80
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
81

    
82

    
83
def _ComputeWrongFileStoragePaths(paths,
84
                                  _forbidden=_GetForbiddenFileStoragePaths()):
85
  """Cross-checks a list of paths for prefixes considered bad.
86

87
  Some paths, e.g. "/bin", should not be used for file storage.
88

89
  @type paths: list
90
  @param paths: List of paths to be checked
91
  @rtype: list
92
  @return: Sorted list of paths for which the user should be warned
93

94
  """
95
  def _Check(path):
96
    return (not os.path.isabs(path) or
97
            path in _forbidden or
98
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
99

    
100
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
101

    
102

    
103
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
104
  """Returns a list of file storage paths whose prefix is considered bad.
105

106
  See L{_ComputeWrongFileStoragePaths}.
107

108
  """
109
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
110

    
111

    
112
def _CheckFileStoragePath(path, allowed):
113
  """Checks if a path is in a list of allowed paths for file storage.
114

115
  @type path: string
116
  @param path: Path to check
117
  @type allowed: list
118
  @param allowed: List of allowed paths
119
  @raise errors.FileStoragePathError: If the path is not allowed
120

121
  """
122
  if not os.path.isabs(path):
123
    raise errors.FileStoragePathError("File storage path must be absolute,"
124
                                      " got '%s'" % path)
125

    
126
  for i in allowed:
127
    if not os.path.isabs(i):
128
      logging.info("Ignoring relative path '%s' for file storage", i)
129
      continue
130

    
131
    if utils.IsBelowDir(i, path):
132
      break
133
  else:
134
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
135
                                      " storage" % path)
136

    
137

    
138
def _LoadAllowedFileStoragePaths(filename):
139
  """Loads file containing allowed file storage paths.
140

141
  @rtype: list
142
  @return: List of allowed paths (can be an empty list)
143

144
  """
145
  try:
146
    contents = utils.ReadFile(filename)
147
  except EnvironmentError:
148
    return []
149
  else:
150
    return utils.FilterEmptyLinesAndComments(contents)
151

    
152

    
153
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
154
  """Checks if a path is allowed for file storage.
155

156
  @type path: string
157
  @param path: Path to check
158
  @raise errors.FileStoragePathError: If the path is not allowed
159

160
  """
161
  allowed = _LoadAllowedFileStoragePaths(_filename)
162

    
163
  if _ComputeWrongFileStoragePaths([path]):
164
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
165
                                      path)
166

    
167
  _CheckFileStoragePath(path, allowed)
168

    
169

    
170
class LogicalVolume(base.BlockDev):
171
  """Logical Volume block device.
172

173
  """
174
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
175
  _PARSE_PV_DEV_RE = re.compile("^([^ ()]+)\([0-9]+\)$")
176
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
177
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
178

    
179
  def __init__(self, unique_id, children, size, params):
180
    """Attaches to a LV device.
181

182
    The unique_id is a tuple (vg_name, lv_name)
183

184
    """
185
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
186
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
187
      raise ValueError("Invalid configuration data %s" % str(unique_id))
188
    self._vg_name, self._lv_name = unique_id
189
    self._ValidateName(self._vg_name)
190
    self._ValidateName(self._lv_name)
191
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
192
    self._degraded = True
193
    self.major = self.minor = self.pe_size = self.stripe_count = None
194
    self.pv_names = None
195
    self.Attach()
196

    
197
  @staticmethod
198
  def _GetStdPvSize(pvs_info):
199
    """Return the the standard PV size (used with exclusive storage).
200

201
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
202
    @rtype: float
203
    @return: size in MiB
204

205
    """
206
    assert len(pvs_info) > 0
207
    smallest = min([pv.size for pv in pvs_info])
208
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
209

    
210
  @staticmethod
211
  def _ComputeNumPvs(size, pvs_info):
212
    """Compute the number of PVs needed for an LV (with exclusive storage).
213

214
    @type size: float
215
    @param size: LV size in MiB
216
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
217
    @rtype: integer
218
    @return: number of PVs needed
219
    """
220
    assert len(pvs_info) > 0
221
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
222
    return int(math.ceil(float(size) / pv_size))
223

    
224
  @staticmethod
225
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
226
    """Return a list of empty PVs, by name.
227

228
    """
229
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
230
    if max_pvs is not None:
231
      empty_pvs = empty_pvs[:max_pvs]
232
    return map((lambda pv: pv.name), empty_pvs)
233

    
234
  @classmethod
235
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
236
    """Create a new logical volume.
237

238
    """
239
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
240
      raise errors.ProgrammerError("Invalid configuration data %s" %
241
                                   str(unique_id))
242
    vg_name, lv_name = unique_id
243
    cls._ValidateName(vg_name)
244
    cls._ValidateName(lv_name)
245
    pvs_info = cls.GetPVInfo([vg_name])
246
    if not pvs_info:
247
      if excl_stor:
248
        msg = "No (empty) PVs found"
249
      else:
250
        msg = "Can't compute PV info for vg %s" % vg_name
251
      base.ThrowError(msg)
252
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
253

    
254
    pvlist = [pv.name for pv in pvs_info]
255
    if compat.any(":" in v for v in pvlist):
256
      base.ThrowError("Some of your PVs have the invalid character ':' in their"
257
                      " name, this is not supported - please filter them out"
258
                      " in lvm.conf using either 'filter' or 'preferred_names'")
259

    
260
    current_pvs = len(pvlist)
261
    desired_stripes = params[constants.LDP_STRIPES]
262
    stripes = min(current_pvs, desired_stripes)
263

    
264
    if excl_stor:
265
      if spindles is None:
266
        base.ThrowError("Unspecified number of spindles: this is required"
267
                        "when exclusive storage is enabled, try running"
268
                        " gnt-cluster repair-disk-sizes")
269
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
270
      if err_msgs:
271
        for m in err_msgs:
272
          logging.warning(m)
273
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
274
      if spindles < req_pvs:
275
        base.ThrowError("Requested number of spindles (%s) is not enough for"
276
                        " a disk of %d MB (at least %d spindles needed)",
277
                        spindles, size, req_pvs)
278
      else:
279
        req_pvs = spindles
280
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
281
      current_pvs = len(pvlist)
282
      if current_pvs < req_pvs:
283
        base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
284
                        " MB: %d available, %d needed",
285
                        size, current_pvs, req_pvs)
286
      assert current_pvs == len(pvlist)
287
      if stripes > current_pvs:
288
        # No warning issued for this, as it's no surprise
289
        stripes = current_pvs
290

    
291
    else:
292
      if stripes < desired_stripes:
293
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
294
                        " available.", desired_stripes, vg_name, current_pvs)
295
      free_size = sum([pv.free for pv in pvs_info])
296
      # The size constraint should have been checked from the master before
297
      # calling the create function.
298
      if free_size < size:
299
        base.ThrowError("Not enough free space: required %s,"
300
                        " available %s", size, free_size)
301

    
302
    # If the free space is not well distributed, we won't be able to
303
    # create an optimally-striped volume; in that case, we want to try
304
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
305
    # stripes
306
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
307
    for stripes_arg in range(stripes, 0, -1):
308
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
309
      if not result.failed:
310
        break
311
    if result.failed:
312
      base.ThrowError("LV create failed (%s): %s",
313
                      result.fail_reason, result.output)
314
    return LogicalVolume(unique_id, children, size, params)
315

    
316
  @staticmethod
317
  def _GetVolumeInfo(lvm_cmd, fields):
318
    """Returns LVM Volume infos using lvm_cmd
319

320
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
321
    @param fields: Fields to return
322
    @return: A list of dicts each with the parsed fields
323

324
    """
325
    if not fields:
326
      raise errors.ProgrammerError("No fields specified")
327

    
328
    sep = "|"
329
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
330
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
331

    
332
    result = utils.RunCmd(cmd)
333
    if result.failed:
334
      raise errors.CommandError("Can't get the volume information: %s - %s" %
335
                                (result.fail_reason, result.output))
336

    
337
    data = []
338
    for line in result.stdout.splitlines():
339
      splitted_fields = line.strip().split(sep)
340

    
341
      if len(fields) != len(splitted_fields):
342
        raise errors.CommandError("Can't parse %s output: line '%s'" %
343
                                  (lvm_cmd, line))
344

    
345
      data.append(splitted_fields)
346

    
347
    return data
348

    
349
  @classmethod
350
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
351
    """Get the free space info for PVs in a volume group.
352

353
    @param vg_names: list of volume group names, if empty all will be returned
354
    @param filter_allocatable: whether to skip over unallocatable PVs
355
    @param include_lvs: whether to include a list of LVs hosted on each PV
356

357
    @rtype: list
358
    @return: list of objects.LvmPvInfo objects
359

360
    """
361
    # We request "lv_name" field only if we care about LVs, so we don't get
362
    # a long list of entries with many duplicates unless we really have to.
363
    # The duplicate "pv_name" field will be ignored.
364
    if include_lvs:
365
      lvfield = "lv_name"
366
    else:
367
      lvfield = "pv_name"
368
    try:
369
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
370
                                        "pv_attr", "pv_size", lvfield])
371
    except errors.GenericError, err:
372
      logging.error("Can't get PV information: %s", err)
373
      return None
374

    
375
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
376
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
377
    # out duplicates.
378
    if include_lvs:
379
      info.sort(key=(lambda i: (i[0], i[5])))
380
    data = []
381
    lastpvi = None
382
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
383
      # (possibly) skip over pvs which are not allocatable
384
      if filter_allocatable and pv_attr[0] != "a":
385
        continue
386
      # (possibly) skip over pvs which are not in the right volume group(s)
387
      if vg_names and vg_name not in vg_names:
388
        continue
389
      # Beware of duplicates (check before inserting)
390
      if lastpvi and lastpvi.name == pv_name:
391
        if include_lvs and lv_name:
392
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
393
            lastpvi.lv_list.append(lv_name)
394
      else:
395
        if include_lvs and lv_name:
396
          lvl = [lv_name]
397
        else:
398
          lvl = []
399
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
400
                                    size=float(pv_size), free=float(pv_free),
401
                                    attributes=pv_attr, lv_list=lvl)
402
        data.append(lastpvi)
403

    
404
    return data
405

    
406
  @classmethod
407
  def _GetExclusiveStorageVgFree(cls, vg_name):
408
    """Return the free disk space in the given VG, in exclusive storage mode.
409

410
    @type vg_name: string
411
    @param vg_name: VG name
412
    @rtype: float
413
    @return: free space in MiB
414
    """
415
    pvs_info = cls.GetPVInfo([vg_name])
416
    if not pvs_info:
417
      return 0.0
418
    pv_size = cls._GetStdPvSize(pvs_info)
419
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
420
    return pv_size * num_pvs
421

    
422
  @classmethod
423
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
424
    """Get the free space info for specific VGs.
425

426
    @param vg_names: list of volume group names, if empty all will be returned
427
    @param excl_stor: whether exclusive_storage is enabled
428
    @param filter_readonly: whether to skip over readonly VGs
429

430
    @rtype: list
431
    @return: list of tuples (free_space, total_size, name) with free_space in
432
             MiB
433

434
    """
435
    try:
436
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
437
                                        "vg_size"])
438
    except errors.GenericError, err:
439
      logging.error("Can't get VG information: %s", err)
440
      return None
441

    
442
    data = []
443
    for vg_name, vg_free, vg_attr, vg_size in info:
444
      # (possibly) skip over vgs which are not writable
445
      if filter_readonly and vg_attr[0] == "r":
446
        continue
447
      # (possibly) skip over vgs which are not in the right volume group(s)
448
      if vg_names and vg_name not in vg_names:
449
        continue
450
      # Exclusive storage needs a different concept of free space
451
      if excl_stor:
452
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
453
        assert es_free <= vg_free
454
        vg_free = es_free
455
      data.append((float(vg_free), float(vg_size), vg_name))
456

    
457
    return data
458

    
459
  @classmethod
460
  def _ValidateName(cls, name):
461
    """Validates that a given name is valid as VG or LV name.
462

463
    The list of valid characters and restricted names is taken out of
464
    the lvm(8) manpage, with the simplification that we enforce both
465
    VG and LV restrictions on the names.
466

467
    """
468
    if (not cls._VALID_NAME_RE.match(name) or
469
        name in cls._INVALID_NAMES or
470
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
471
      base.ThrowError("Invalid LVM name '%s'", name)
472

    
473
  def Remove(self):
474
    """Remove this logical volume.
475

476
    """
477
    if not self.minor and not self.Attach():
478
      # the LV does not exist
479
      return
480
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
481
                           (self._vg_name, self._lv_name)])
482
    if result.failed:
483
      base.ThrowError("Can't lvremove: %s - %s",
484
                      result.fail_reason, result.output)
485

    
486
  def Rename(self, new_id):
487
    """Rename this logical volume.
488

489
    """
490
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
491
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
492
    new_vg, new_name = new_id
493
    if new_vg != self._vg_name:
494
      raise errors.ProgrammerError("Can't move a logical volume across"
495
                                   " volume groups (from %s to to %s)" %
496
                                   (self._vg_name, new_vg))
497
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
498
    if result.failed:
499
      base.ThrowError("Failed to rename the logical volume: %s", result.output)
500
    self._lv_name = new_name
501
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
502

    
503
  @classmethod
504
  def _ParseLvInfoLine(cls, line, sep):
505
    """Parse one line of the lvs output used in L{_GetLvInfo}.
506

507
    """
508
    elems = line.strip().rstrip(sep).split(sep)
509
    if len(elems) != 6:
510
      base.ThrowError("Can't parse LVS output, len(%s) != 6", str(elems))
511

    
512
    (status, major, minor, pe_size, stripes, pvs) = elems
513
    if len(status) < 6:
514
      base.ThrowError("lvs lv_attr is not at least 6 characters (%s)", status)
515

    
516
    try:
517
      major = int(major)
518
      minor = int(minor)
519
    except (TypeError, ValueError), err:
520
      base.ThrowError("lvs major/minor cannot be parsed: %s", str(err))
521

    
522
    try:
523
      pe_size = int(float(pe_size))
524
    except (TypeError, ValueError), err:
525
      base.ThrowError("Can't parse vg extent size: %s", err)
526

    
527
    try:
528
      stripes = int(stripes)
529
    except (TypeError, ValueError), err:
530
      base.ThrowError("Can't parse the number of stripes: %s", err)
531

    
532
    pv_names = []
533
    for pv in pvs.split(","):
534
      m = re.match(cls._PARSE_PV_DEV_RE, pv)
535
      if not m:
536
        base.ThrowError("Can't parse this device list: %s", pvs)
537
      pv_names.append(m.group(1))
538
    assert len(pv_names) > 0
539

    
540
    return (status, major, minor, pe_size, stripes, pv_names)
541

    
542
  @classmethod
543
  def _GetLvInfo(cls, dev_path, _run_cmd=utils.RunCmd):
544
    """Get info about the given existing LV to be used.
545

546
    """
547
    sep = "|"
548
    result = _run_cmd(["lvs", "--noheadings", "--separator=%s" % sep,
549
                       "--units=k", "--nosuffix",
550
                       "-olv_attr,lv_kernel_major,lv_kernel_minor,"
551
                       "vg_extent_size,stripes,devices", dev_path])
552
    if result.failed:
553
      base.ThrowError("Can't find LV %s: %s, %s",
554
                      dev_path, result.fail_reason, result.output)
555
    # the output can (and will) have multiple lines for multi-segment
556
    # LVs, as the 'stripes' parameter is a segment one, so we take
557
    # only the last entry, which is the one we're interested in; note
558
    # that with LVM2 anyway the 'stripes' value must be constant
559
    # across segments, so this is a no-op actually
560
    out = result.stdout.splitlines()
561
    if not out: # totally empty result? splitlines() returns at least
562
                # one line for any non-empty string
563
      base.ThrowError("Can't parse LVS output, no lines? Got '%s'", str(out))
564
    pv_names = set()
565
    for line in out:
566
      (status, major, minor, pe_size, stripes, more_pvs) = \
567
        cls._ParseLvInfoLine(line, sep)
568
      pv_names.update(more_pvs)
569
    return (status, major, minor, pe_size, stripes, pv_names)
570

    
571
  def Attach(self):
572
    """Attach to an existing LV.
573

574
    This method will try to see if an existing and active LV exists
575
    which matches our name. If so, its major/minor will be
576
    recorded.
577

578
    """
579
    self.attached = False
580
    try:
581
      (status, major, minor, pe_size, stripes, pv_names) = \
582
        self._GetLvInfo(self.dev_path)
583
    except errors.BlockDeviceError:
584
      return False
585

    
586
    self.major = major
587
    self.minor = minor
588
    self.pe_size = pe_size
589
    self.stripe_count = stripes
590
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
591
                                      # storage
592
    self.pv_names = pv_names
593
    self.attached = True
594
    return True
595

    
596
  def Assemble(self):
597
    """Assemble the device.
598

599
    We always run `lvchange -ay` on the LV to ensure it's active before
600
    use, as there were cases when xenvg was not active after boot
601
    (also possibly after disk issues).
602

603
    """
604
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
605
    if result.failed:
606
      base.ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
607

    
608
  def Shutdown(self):
609
    """Shutdown the device.
610

611
    This is a no-op for the LV device type, as we don't deactivate the
612
    volumes on shutdown.
613

614
    """
615
    pass
616

    
617
  def GetSyncStatus(self):
618
    """Returns the sync status of the device.
619

620
    If this device is a mirroring device, this function returns the
621
    status of the mirror.
622

623
    For logical volumes, sync_percent and estimated_time are always
624
    None (no recovery in progress, as we don't handle the mirrored LV
625
    case). The is_degraded parameter is the inverse of the ldisk
626
    parameter.
627

628
    For the ldisk parameter, we check if the logical volume has the
629
    'virtual' type, which means it's not backed by existing storage
630
    anymore (read from it return I/O error). This happens after a
631
    physical disk failure and subsequent 'vgreduce --removemissing' on
632
    the volume group.
633

634
    The status was already read in Attach, so we just return it.
635

636
    @rtype: objects.BlockDevStatus
637

638
    """
639
    if self._degraded:
640
      ldisk_status = constants.LDS_FAULTY
641
    else:
642
      ldisk_status = constants.LDS_OKAY
643

    
644
    return objects.BlockDevStatus(dev_path=self.dev_path,
645
                                  major=self.major,
646
                                  minor=self.minor,
647
                                  sync_percent=None,
648
                                  estimated_time=None,
649
                                  is_degraded=self._degraded,
650
                                  ldisk_status=ldisk_status)
651

    
652
  def Open(self, force=False):
653
    """Make the device ready for I/O.
654

655
    This is a no-op for the LV device type.
656

657
    """
658
    pass
659

    
660
  def Close(self):
661
    """Notifies that the device will no longer be used for I/O.
662

663
    This is a no-op for the LV device type.
664

665
    """
666
    pass
667

    
668
  def Snapshot(self, size):
669
    """Create a snapshot copy of an lvm block device.
670

671
    @returns: tuple (vg, lv)
672

673
    """
674
    snap_name = self._lv_name + ".snap"
675

    
676
    # remove existing snapshot if found
677
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
678
    base.IgnoreError(snap.Remove)
679

    
680
    vg_info = self.GetVGInfo([self._vg_name], False)
681
    if not vg_info:
682
      base.ThrowError("Can't compute VG info for vg %s", self._vg_name)
683
    free_size, _, _ = vg_info[0]
684
    if free_size < size:
685
      base.ThrowError("Not enough free space: required %s,"
686
                      " available %s", size, free_size)
687

    
688
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
689
                               "-n%s" % snap_name, self.dev_path]))
690

    
691
    return (self._vg_name, snap_name)
692

    
693
  def _RemoveOldInfo(self):
694
    """Try to remove old tags from the lv.
695

696
    """
697
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
698
                           self.dev_path])
699
    _CheckResult(result)
700

    
701
    raw_tags = result.stdout.strip()
702
    if raw_tags:
703
      for tag in raw_tags.split(","):
704
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
705
                                   tag.strip(), self.dev_path]))
706

    
707
  def SetInfo(self, text):
708
    """Update metadata with info text.
709

710
    """
711
    base.BlockDev.SetInfo(self, text)
712

    
713
    self._RemoveOldInfo()
714

    
715
    # Replace invalid characters
716
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
717
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
718

    
719
    # Only up to 128 characters are allowed
720
    text = text[:128]
721

    
722
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
723

    
724
  def Grow(self, amount, dryrun, backingstore):
725
    """Grow the logical volume.
726

727
    """
728
    if not backingstore:
729
      return
730
    if self.pe_size is None or self.stripe_count is None:
731
      if not self.Attach():
732
        base.ThrowError("Can't attach to LV during Grow()")
733
    full_stripe_size = self.pe_size * self.stripe_count
734
    # pe_size is in KB
735
    amount *= 1024
736
    rest = amount % full_stripe_size
737
    if rest != 0:
738
      amount += full_stripe_size - rest
739
    cmd = ["lvextend", "-L", "+%dk" % amount]
740
    if dryrun:
741
      cmd.append("--test")
742
    # we try multiple algorithms since the 'best' ones might not have
743
    # space available in the right place, but later ones might (since
744
    # they have less constraints); also note that only recent LVM
745
    # supports 'cling'
746
    for alloc_policy in "contiguous", "cling", "normal":
747
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
748
      if not result.failed:
749
        return
750
    base.ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
751

    
752
  def GetActualSpindles(self):
753
    """Return the number of spindles used.
754

755
    """
756
    assert self.attached, "BlockDevice not attached in GetActualSpindles()"
757
    return len(self.pv_names)
758

    
759

    
760
class FileStorage(base.BlockDev):
761
  """File device.
762

763
  This class represents the a file storage backend device.
764

765
  The unique_id for the file device is a (file_driver, file_path) tuple.
766

767
  """
768
  def __init__(self, unique_id, children, size, params):
769
    """Initalizes a file device backend.
770

771
    """
772
    if children:
773
      raise errors.BlockDeviceError("Invalid setup for file device")
774
    super(FileStorage, self).__init__(unique_id, children, size, params)
775
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
776
      raise ValueError("Invalid configuration data %s" % str(unique_id))
777
    self.driver = unique_id[0]
778
    self.dev_path = unique_id[1]
779

    
780
    CheckFileStoragePath(self.dev_path)
781

    
782
    self.Attach()
783

    
784
  def Assemble(self):
785
    """Assemble the device.
786

787
    Checks whether the file device exists, raises BlockDeviceError otherwise.
788

789
    """
790
    if not os.path.exists(self.dev_path):
791
      base.ThrowError("File device '%s' does not exist" % self.dev_path)
792

    
793
  def Shutdown(self):
794
    """Shutdown the device.
795

796
    This is a no-op for the file type, as we don't deactivate
797
    the file on shutdown.
798

799
    """
800
    pass
801

    
802
  def Open(self, force=False):
803
    """Make the device ready for I/O.
804

805
    This is a no-op for the file type.
806

807
    """
808
    pass
809

    
810
  def Close(self):
811
    """Notifies that the device will no longer be used for I/O.
812

813
    This is a no-op for the file type.
814

815
    """
816
    pass
817

    
818
  def Remove(self):
819
    """Remove the file backing the block device.
820

821
    @rtype: boolean
822
    @return: True if the removal was successful
823

824
    """
825
    try:
826
      os.remove(self.dev_path)
827
    except OSError, err:
828
      if err.errno != errno.ENOENT:
829
        base.ThrowError("Can't remove file '%s': %s", self.dev_path, err)
830

    
831
  def Rename(self, new_id):
832
    """Renames the file.
833

834
    """
835
    # TODO: implement rename for file-based storage
836
    base.ThrowError("Rename is not supported for file-based storage")
837

    
838
  def Grow(self, amount, dryrun, backingstore):
839
    """Grow the file
840

841
    @param amount: the amount (in mebibytes) to grow with
842

843
    """
844
    if not backingstore:
845
      return
846
    # Check that the file exists
847
    self.Assemble()
848
    current_size = self.GetActualSize()
849
    new_size = current_size + amount * 1024 * 1024
850
    assert new_size > current_size, "Cannot Grow with a negative amount"
851
    # We can't really simulate the growth
852
    if dryrun:
853
      return
854
    try:
855
      f = open(self.dev_path, "a+")
856
      f.truncate(new_size)
857
      f.close()
858
    except EnvironmentError, err:
859
      base.ThrowError("Error in file growth: %", str(err))
860

    
861
  def Attach(self):
862
    """Attach to an existing file.
863

864
    Check if this file already exists.
865

866
    @rtype: boolean
867
    @return: True if file exists
868

869
    """
870
    self.attached = os.path.exists(self.dev_path)
871
    return self.attached
872

    
873
  def GetActualSize(self):
874
    """Return the actual disk size.
875

876
    @note: the device needs to be active when this is called
877

878
    """
879
    assert self.attached, "BlockDevice not attached in GetActualSize()"
880
    try:
881
      st = os.stat(self.dev_path)
882
      return st.st_size
883
    except OSError, err:
884
      base.ThrowError("Can't stat %s: %s", self.dev_path, err)
885

    
886
  @classmethod
887
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
888
    """Create a new file.
889

890
    @param size: the size of file in MiB
891

892
    @rtype: L{bdev.FileStorage}
893
    @return: an instance of FileStorage
894

895
    """
896
    if excl_stor:
897
      raise errors.ProgrammerError("FileStorage device requested with"
898
                                   " exclusive_storage")
899
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
900
      raise ValueError("Invalid configuration data %s" % str(unique_id))
901

    
902
    dev_path = unique_id[1]
903

    
904
    CheckFileStoragePath(dev_path)
905

    
906
    try:
907
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
908
      f = os.fdopen(fd, "w")
909
      f.truncate(size * 1024 * 1024)
910
      f.close()
911
    except EnvironmentError, err:
912
      if err.errno == errno.EEXIST:
913
        base.ThrowError("File already existing: %s", dev_path)
914
      base.ThrowError("Error in file creation: %", str(err))
915

    
916
    return FileStorage(unique_id, children, size, params)
917

    
918

    
919
class PersistentBlockDevice(base.BlockDev):
920
  """A block device with persistent node
921

922
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
923
  udev helpers are probably required to give persistent, human-friendly
924
  names.
925

926
  For the time being, pathnames are required to lie under /dev.
927

928
  """
929
  def __init__(self, unique_id, children, size, params):
930
    """Attaches to a static block device.
931

932
    The unique_id is a path under /dev.
933

934
    """
935
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
936
                                                params)
937
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
938
      raise ValueError("Invalid configuration data %s" % str(unique_id))
939
    self.dev_path = unique_id[1]
940
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
941
      raise ValueError("Full path '%s' lies outside /dev" %
942
                              os.path.realpath(self.dev_path))
943
    # TODO: this is just a safety guard checking that we only deal with devices
944
    # we know how to handle. In the future this will be integrated with
945
    # external storage backends and possible values will probably be collected
946
    # from the cluster configuration.
947
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
948
      raise ValueError("Got persistent block device of invalid type: %s" %
949
                       unique_id[0])
950

    
951
    self.major = self.minor = None
952
    self.Attach()
953

    
954
  @classmethod
955
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
956
    """Create a new device
957

958
    This is a noop, we only return a PersistentBlockDevice instance
959

960
    """
961
    if excl_stor:
962
      raise errors.ProgrammerError("Persistent block device requested with"
963
                                   " exclusive_storage")
964
    return PersistentBlockDevice(unique_id, children, 0, params)
965

    
966
  def Remove(self):
967
    """Remove a device
968

969
    This is a noop
970

971
    """
972
    pass
973

    
974
  def Rename(self, new_id):
975
    """Rename this device.
976

977
    """
978
    base.ThrowError("Rename is not supported for PersistentBlockDev storage")
979

    
980
  def Attach(self):
981
    """Attach to an existing block device.
982

983

984
    """
985
    self.attached = False
986
    try:
987
      st = os.stat(self.dev_path)
988
    except OSError, err:
989
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
990
      return False
991

    
992
    if not stat.S_ISBLK(st.st_mode):
993
      logging.error("%s is not a block device", self.dev_path)
994
      return False
995

    
996
    self.major = os.major(st.st_rdev)
997
    self.minor = os.minor(st.st_rdev)
998
    self.attached = True
999

    
1000
    return True
1001

    
1002
  def Assemble(self):
1003
    """Assemble the device.
1004

1005
    """
1006
    pass
1007

    
1008
  def Shutdown(self):
1009
    """Shutdown the device.
1010

1011
    """
1012
    pass
1013

    
1014
  def Open(self, force=False):
1015
    """Make the device ready for I/O.
1016

1017
    """
1018
    pass
1019

    
1020
  def Close(self):
1021
    """Notifies that the device will no longer be used for I/O.
1022

1023
    """
1024
    pass
1025

    
1026
  def Grow(self, amount, dryrun, backingstore):
1027
    """Grow the logical volume.
1028

1029
    """
1030
    base.ThrowError("Grow is not supported for PersistentBlockDev storage")
1031

    
1032

    
1033
class RADOSBlockDevice(base.BlockDev):
1034
  """A RADOS Block Device (rbd).
1035

1036
  This class implements the RADOS Block Device for the backend. You need
1037
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
1038
  this to be functional.
1039

1040
  """
1041
  def __init__(self, unique_id, children, size, params):
1042
    """Attaches to an rbd device.
1043

1044
    """
1045
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
1046
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1047
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1048

    
1049
    self.driver, self.rbd_name = unique_id
1050

    
1051
    self.major = self.minor = None
1052
    self.Attach()
1053

    
1054
  @classmethod
1055
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1056
    """Create a new rbd device.
1057

1058
    Provision a new rbd volume inside a RADOS pool.
1059

1060
    """
1061
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1062
      raise errors.ProgrammerError("Invalid configuration data %s" %
1063
                                   str(unique_id))
1064
    if excl_stor:
1065
      raise errors.ProgrammerError("RBD device requested with"
1066
                                   " exclusive_storage")
1067
    rbd_pool = params[constants.LDP_POOL]
1068
    rbd_name = unique_id[1]
1069

    
1070
    # Provision a new rbd volume (Image) inside the RADOS cluster.
1071
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
1072
           rbd_name, "--size", "%s" % size]
1073
    result = utils.RunCmd(cmd)
1074
    if result.failed:
1075
      base.ThrowError("rbd creation failed (%s): %s",
1076
                      result.fail_reason, result.output)
1077

    
1078
    return RADOSBlockDevice(unique_id, children, size, params)
1079

    
1080
  def Remove(self):
1081
    """Remove the rbd device.
1082

1083
    """
1084
    rbd_pool = self.params[constants.LDP_POOL]
1085
    rbd_name = self.unique_id[1]
1086

    
1087
    if not self.minor and not self.Attach():
1088
      # The rbd device doesn't exist.
1089
      return
1090

    
1091
    # First shutdown the device (remove mappings).
1092
    self.Shutdown()
1093

    
1094
    # Remove the actual Volume (Image) from the RADOS cluster.
1095
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
1096
    result = utils.RunCmd(cmd)
1097
    if result.failed:
1098
      base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
1099
                      result.fail_reason, result.output)
1100

    
1101
  def Rename(self, new_id):
1102
    """Rename this device.
1103

1104
    """
1105
    pass
1106

    
1107
  def Attach(self):
1108
    """Attach to an existing rbd device.
1109

1110
    This method maps the rbd volume that matches our name with
1111
    an rbd device and then attaches to this device.
1112

1113
    """
1114
    self.attached = False
1115

    
1116
    # Map the rbd volume to a block device under /dev
1117
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
1118

    
1119
    try:
1120
      st = os.stat(self.dev_path)
1121
    except OSError, err:
1122
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1123
      return False
1124

    
1125
    if not stat.S_ISBLK(st.st_mode):
1126
      logging.error("%s is not a block device", self.dev_path)
1127
      return False
1128

    
1129
    self.major = os.major(st.st_rdev)
1130
    self.minor = os.minor(st.st_rdev)
1131
    self.attached = True
1132

    
1133
    return True
1134

    
1135
  def _MapVolumeToBlockdev(self, unique_id):
1136
    """Maps existing rbd volumes to block devices.
1137

1138
    This method should be idempotent if the mapping already exists.
1139

1140
    @rtype: string
1141
    @return: the block device path that corresponds to the volume
1142

1143
    """
1144
    pool = self.params[constants.LDP_POOL]
1145
    name = unique_id[1]
1146

    
1147
    # Check if the mapping already exists.
1148
    rbd_dev = self._VolumeToBlockdev(pool, name)
1149
    if rbd_dev:
1150
      # The mapping exists. Return it.
1151
      return rbd_dev
1152

    
1153
    # The mapping doesn't exist. Create it.
1154
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
1155
    result = utils.RunCmd(map_cmd)
1156
    if result.failed:
1157
      base.ThrowError("rbd map failed (%s): %s",
1158
                      result.fail_reason, result.output)
1159

    
1160
    # Find the corresponding rbd device.
1161
    rbd_dev = self._VolumeToBlockdev(pool, name)
1162
    if not rbd_dev:
1163
      base.ThrowError("rbd map succeeded, but could not find the rbd block"
1164
                      " device in output of showmapped, for volume: %s", name)
1165

    
1166
    # The device was successfully mapped. Return it.
1167
    return rbd_dev
1168

    
1169
  @classmethod
1170
  def _VolumeToBlockdev(cls, pool, volume_name):
1171
    """Do the 'volume name'-to-'rbd block device' resolving.
1172

1173
    @type pool: string
1174
    @param pool: RADOS pool to use
1175
    @type volume_name: string
1176
    @param volume_name: the name of the volume whose device we search for
1177
    @rtype: string or None
1178
    @return: block device path if the volume is mapped, else None
1179

1180
    """
1181
    try:
1182
      # Newer versions of the rbd tool support json output formatting. Use it
1183
      # if available.
1184
      showmap_cmd = [
1185
        constants.RBD_CMD,
1186
        "showmapped",
1187
        "-p",
1188
        pool,
1189
        "--format",
1190
        "json"
1191
        ]
1192
      result = utils.RunCmd(showmap_cmd)
1193
      if result.failed:
1194
        logging.error("rbd JSON output formatting returned error (%s): %s,"
1195
                      "falling back to plain output parsing",
1196
                      result.fail_reason, result.output)
1197
        raise RbdShowmappedJsonError
1198

    
1199
      return cls._ParseRbdShowmappedJson(result.output, volume_name)
1200
    except RbdShowmappedJsonError:
1201
      # For older versions of rbd, we have to parse the plain / text output
1202
      # manually.
1203
      showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
1204
      result = utils.RunCmd(showmap_cmd)
1205
      if result.failed:
1206
        base.ThrowError("rbd showmapped failed (%s): %s",
1207
                        result.fail_reason, result.output)
1208

    
1209
      return cls._ParseRbdShowmappedPlain(result.output, volume_name)
1210

    
1211
  @staticmethod
1212
  def _ParseRbdShowmappedJson(output, volume_name):
1213
    """Parse the json output of `rbd showmapped'.
1214

1215
    This method parses the json output of `rbd showmapped' and returns the rbd
1216
    block device path (e.g. /dev/rbd0) that matches the given rbd volume.
1217

1218
    @type output: string
1219
    @param output: the json output of `rbd showmapped'
1220
    @type volume_name: string
1221
    @param volume_name: the name of the volume whose device we search for
1222
    @rtype: string or None
1223
    @return: block device path if the volume is mapped, else None
1224

1225
    """
1226
    try:
1227
      devices = serializer.LoadJson(output)
1228
    except ValueError, err:
1229
      base.ThrowError("Unable to parse JSON data: %s" % err)
1230

    
1231
    rbd_dev = None
1232
    for d in devices.values(): # pylint: disable=E1103
1233
      try:
1234
        name = d["name"]
1235
      except KeyError:
1236
        base.ThrowError("'name' key missing from json object %s", devices)
1237

    
1238
      if name == volume_name:
1239
        if rbd_dev is not None:
1240
          base.ThrowError("rbd volume %s is mapped more than once", volume_name)
1241

    
1242
        rbd_dev = d["device"]
1243

    
1244
    return rbd_dev
1245

    
1246
  @staticmethod
1247
  def _ParseRbdShowmappedPlain(output, volume_name):
1248
    """Parse the (plain / text) output of `rbd showmapped'.
1249

1250
    This method parses the output of `rbd showmapped' and returns
1251
    the rbd block device path (e.g. /dev/rbd0) that matches the
1252
    given rbd volume.
1253

1254
    @type output: string
1255
    @param output: the plain text output of `rbd showmapped'
1256
    @type volume_name: string
1257
    @param volume_name: the name of the volume whose device we search for
1258
    @rtype: string or None
1259
    @return: block device path if the volume is mapped, else None
1260

1261
    """
1262
    allfields = 5
1263
    volumefield = 2
1264
    devicefield = 4
1265

    
1266
    lines = output.splitlines()
1267

    
1268
    # Try parsing the new output format (ceph >= 0.55).
1269
    splitted_lines = map(lambda l: l.split(), lines)
1270

    
1271
    # Check for empty output.
1272
    if not splitted_lines:
1273
      return None
1274

    
1275
    # Check showmapped output, to determine number of fields.
1276
    field_cnt = len(splitted_lines[0])
1277
    if field_cnt != allfields:
1278
      # Parsing the new format failed. Fallback to parsing the old output
1279
      # format (< 0.55).
1280
      splitted_lines = map(lambda l: l.split("\t"), lines)
1281
      if field_cnt != allfields:
1282
        base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
1283
                        " found %s", allfields, field_cnt)
1284

    
1285
    matched_lines = \
1286
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
1287
             splitted_lines)
1288

    
1289
    if len(matched_lines) > 1:
1290
      base.ThrowError("rbd volume %s mapped more than once", volume_name)
1291

    
1292
    if matched_lines:
1293
      # rbd block device found. Return it.
1294
      rbd_dev = matched_lines[0][devicefield]
1295
      return rbd_dev
1296

    
1297
    # The given volume is not mapped.
1298
    return None
1299

    
1300
  def Assemble(self):
1301
    """Assemble the device.
1302

1303
    """
1304
    pass
1305

    
1306
  def Shutdown(self):
1307
    """Shutdown the device.
1308

1309
    """
1310
    if not self.minor and not self.Attach():
1311
      # The rbd device doesn't exist.
1312
      return
1313

    
1314
    # Unmap the block device from the Volume.
1315
    self._UnmapVolumeFromBlockdev(self.unique_id)
1316

    
1317
    self.minor = None
1318
    self.dev_path = None
1319

    
1320
  def _UnmapVolumeFromBlockdev(self, unique_id):
1321
    """Unmaps the rbd device from the Volume it is mapped.
1322

1323
    Unmaps the rbd device from the Volume it was previously mapped to.
1324
    This method should be idempotent if the Volume isn't mapped.
1325

1326
    """
1327
    pool = self.params[constants.LDP_POOL]
1328
    name = unique_id[1]
1329

    
1330
    # Check if the mapping already exists.
1331
    rbd_dev = self._VolumeToBlockdev(pool, name)
1332

    
1333
    if rbd_dev:
1334
      # The mapping exists. Unmap the rbd device.
1335
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
1336
      result = utils.RunCmd(unmap_cmd)
1337
      if result.failed:
1338
        base.ThrowError("rbd unmap failed (%s): %s",
1339
                        result.fail_reason, result.output)
1340

    
1341
  def Open(self, force=False):
1342
    """Make the device ready for I/O.
1343

1344
    """
1345
    pass
1346

    
1347
  def Close(self):
1348
    """Notifies that the device will no longer be used for I/O.
1349

1350
    """
1351
    pass
1352

    
1353
  def Grow(self, amount, dryrun, backingstore):
1354
    """Grow the Volume.
1355

1356
    @type amount: integer
1357
    @param amount: the amount (in mebibytes) to grow with
1358
    @type dryrun: boolean
1359
    @param dryrun: whether to execute the operation in simulation mode
1360
        only, without actually increasing the size
1361

1362
    """
1363
    if not backingstore:
1364
      return
1365
    if not self.Attach():
1366
      base.ThrowError("Can't attach to rbd device during Grow()")
1367

    
1368
    if dryrun:
1369
      # the rbd tool does not support dry runs of resize operations.
1370
      # Since rbd volumes are thinly provisioned, we assume
1371
      # there is always enough free space for the operation.
1372
      return
1373

    
1374
    rbd_pool = self.params[constants.LDP_POOL]
1375
    rbd_name = self.unique_id[1]
1376
    new_size = self.size + amount
1377

    
1378
    # Resize the rbd volume (Image) inside the RADOS cluster.
1379
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
1380
           rbd_name, "--size", "%s" % new_size]
1381
    result = utils.RunCmd(cmd)
1382
    if result.failed:
1383
      base.ThrowError("rbd resize failed (%s): %s",
1384
                      result.fail_reason, result.output)
1385

    
1386

    
1387
class ExtStorageDevice(base.BlockDev):
1388
  """A block device provided by an ExtStorage Provider.
1389

1390
  This class implements the External Storage Interface, which means
1391
  handling of the externally provided block devices.
1392

1393
  """
1394
  def __init__(self, unique_id, children, size, params):
1395
    """Attaches to an extstorage block device.
1396

1397
    """
1398
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
1399
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1400
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1401

    
1402
    self.driver, self.vol_name = unique_id
1403
    self.ext_params = params
1404

    
1405
    self.major = self.minor = None
1406
    self.Attach()
1407

    
1408
  @classmethod
1409
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1410
    """Create a new extstorage device.
1411

1412
    Provision a new volume using an extstorage provider, which will
1413
    then be mapped to a block device.
1414

1415
    """
1416
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1417
      raise errors.ProgrammerError("Invalid configuration data %s" %
1418
                                   str(unique_id))
1419
    if excl_stor:
1420
      raise errors.ProgrammerError("extstorage device requested with"
1421
                                   " exclusive_storage")
1422

    
1423
    # Call the External Storage's create script,
1424
    # to provision a new Volume inside the External Storage
1425
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
1426
                      params, str(size))
1427

    
1428
    return ExtStorageDevice(unique_id, children, size, params)
1429

    
1430
  def Remove(self):
1431
    """Remove the extstorage device.
1432

1433
    """
1434
    if not self.minor and not self.Attach():
1435
      # The extstorage device doesn't exist.
1436
      return
1437

    
1438
    # First shutdown the device (remove mappings).
1439
    self.Shutdown()
1440

    
1441
    # Call the External Storage's remove script,
1442
    # to remove the Volume from the External Storage
1443
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
1444
                      self.ext_params)
1445

    
1446
  def Rename(self, new_id):
1447
    """Rename this device.
1448

1449
    """
1450
    pass
1451

    
1452
  def Attach(self):
1453
    """Attach to an existing extstorage device.
1454

1455
    This method maps the extstorage volume that matches our name with
1456
    a corresponding block device and then attaches to this device.
1457

1458
    """
1459
    self.attached = False
1460

    
1461
    # Call the External Storage's attach script,
1462
    # to attach an existing Volume to a block device under /dev
1463
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
1464
                                      self.unique_id, self.ext_params)
1465

    
1466
    try:
1467
      st = os.stat(self.dev_path)
1468
    except OSError, err:
1469
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1470
      return False
1471

    
1472
    if not stat.S_ISBLK(st.st_mode):
1473
      logging.error("%s is not a block device", self.dev_path)
1474
      return False
1475

    
1476
    self.major = os.major(st.st_rdev)
1477
    self.minor = os.minor(st.st_rdev)
1478
    self.attached = True
1479

    
1480
    return True
1481

    
1482
  def Assemble(self):
1483
    """Assemble the device.
1484

1485
    """
1486
    pass
1487

    
1488
  def Shutdown(self):
1489
    """Shutdown the device.
1490

1491
    """
1492
    if not self.minor and not self.Attach():
1493
      # The extstorage device doesn't exist.
1494
      return
1495

    
1496
    # Call the External Storage's detach script,
1497
    # to detach an existing Volume from it's block device under /dev
1498
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
1499
                      self.ext_params)
1500

    
1501
    self.minor = None
1502
    self.dev_path = None
1503

    
1504
  def Open(self, force=False):
1505
    """Make the device ready for I/O.
1506

1507
    """
1508
    pass
1509

    
1510
  def Close(self):
1511
    """Notifies that the device will no longer be used for I/O.
1512

1513
    """
1514
    pass
1515

    
1516
  def Grow(self, amount, dryrun, backingstore):
1517
    """Grow the Volume.
1518

1519
    @type amount: integer
1520
    @param amount: the amount (in mebibytes) to grow with
1521
    @type dryrun: boolean
1522
    @param dryrun: whether to execute the operation in simulation mode
1523
        only, without actually increasing the size
1524

1525
    """
1526
    if not backingstore:
1527
      return
1528
    if not self.Attach():
1529
      base.ThrowError("Can't attach to extstorage device during Grow()")
1530

    
1531
    if dryrun:
1532
      # we do not support dry runs of resize operations for now.
1533
      return
1534

    
1535
    new_size = self.size + amount
1536

    
1537
    # Call the External Storage's grow script,
1538
    # to grow an existing Volume inside the External Storage
1539
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
1540
                      self.ext_params, str(self.size), grow=str(new_size))
1541

    
1542
  def SetInfo(self, text):
1543
    """Update metadata with info text.
1544

1545
    """
1546
    # Replace invalid characters
1547
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1548
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1549

    
1550
    # Only up to 128 characters are allowed
1551
    text = text[:128]
1552

    
1553
    # Call the External Storage's setinfo script,
1554
    # to set metadata for an existing Volume inside the External Storage
1555
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
1556
                      self.ext_params, metadata=text)
1557

    
1558

    
1559
def _ExtStorageAction(action, unique_id, ext_params,
1560
                      size=None, grow=None, metadata=None):
1561
  """Take an External Storage action.
1562

1563
  Take an External Storage action concerning or affecting
1564
  a specific Volume inside the External Storage.
1565

1566
  @type action: string
1567
  @param action: which action to perform. One of:
1568
                 create / remove / grow / attach / detach
1569
  @type unique_id: tuple (driver, vol_name)
1570
  @param unique_id: a tuple containing the type of ExtStorage (driver)
1571
                    and the Volume name
1572
  @type ext_params: dict
1573
  @param ext_params: ExtStorage parameters
1574
  @type size: integer
1575
  @param size: the size of the Volume in mebibytes
1576
  @type grow: integer
1577
  @param grow: the new size in mebibytes (after grow)
1578
  @type metadata: string
1579
  @param metadata: metadata info of the Volume, for use by the provider
1580
  @rtype: None or a block device path (during attach)
1581

1582
  """
1583
  driver, vol_name = unique_id
1584

    
1585
  # Create an External Storage instance of type `driver'
1586
  status, inst_es = ExtStorageFromDisk(driver)
1587
  if not status:
1588
    base.ThrowError("%s" % inst_es)
1589

    
1590
  # Create the basic environment for the driver's scripts
1591
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
1592
                                      grow, metadata)
1593

    
1594
  # Do not use log file for action `attach' as we need
1595
  # to get the output from RunResult
1596
  # TODO: find a way to have a log file for attach too
1597
  logfile = None
1598
  if action is not constants.ES_ACTION_ATTACH:
1599
    logfile = _VolumeLogName(action, driver, vol_name)
1600

    
1601
  # Make sure the given action results in a valid script
1602
  if action not in constants.ES_SCRIPTS:
1603
    base.ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
1604
                    action)
1605

    
1606
  # Find out which external script to run according the given action
1607
  script_name = action + "_script"
1608
  script = getattr(inst_es, script_name)
1609

    
1610
  # Run the external script
1611
  result = utils.RunCmd([script], env=create_env,
1612
                        cwd=inst_es.path, output=logfile,)
1613
  if result.failed:
1614
    logging.error("External storage's %s command '%s' returned"
1615
                  " error: %s, logfile: %s, output: %s",
1616
                  action, result.cmd, result.fail_reason,
1617
                  logfile, result.output)
1618

    
1619
    # If logfile is 'None' (during attach), it breaks TailFile
1620
    # TODO: have a log file for attach too
1621
    if action is not constants.ES_ACTION_ATTACH:
1622
      lines = [utils.SafeEncode(val)
1623
               for val in utils.TailFile(logfile, lines=20)]
1624
    else:
1625
      lines = result.output[-20:]
1626

    
1627
    base.ThrowError("External storage's %s script failed (%s), last"
1628
                    " lines of output:\n%s",
1629
                    action, result.fail_reason, "\n".join(lines))
1630

    
1631
  if action == constants.ES_ACTION_ATTACH:
1632
    return result.stdout
1633

    
1634

    
1635
def ExtStorageFromDisk(name, base_dir=None):
1636
  """Create an ExtStorage instance from disk.
1637

1638
  This function will return an ExtStorage instance
1639
  if the given name is a valid ExtStorage name.
1640

1641
  @type base_dir: string
1642
  @keyword base_dir: Base directory containing ExtStorage installations.
1643
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
1644
  @rtype: tuple
1645
  @return: True and the ExtStorage instance if we find a valid one, or
1646
      False and the diagnose message on error
1647

1648
  """
1649
  if base_dir is None:
1650
    es_base_dir = pathutils.ES_SEARCH_PATH
1651
  else:
1652
    es_base_dir = [base_dir]
1653

    
1654
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
1655

    
1656
  if es_dir is None:
1657
    return False, ("Directory for External Storage Provider %s not"
1658
                   " found in search path" % name)
1659

    
1660
  # ES Files dictionary, we will populate it with the absolute path
1661
  # names; if the value is True, then it is a required file, otherwise
1662
  # an optional one
1663
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
1664

    
1665
  es_files[constants.ES_PARAMETERS_FILE] = True
1666

    
1667
  for (filename, _) in es_files.items():
1668
    es_files[filename] = utils.PathJoin(es_dir, filename)
1669

    
1670
    try:
1671
      st = os.stat(es_files[filename])
1672
    except EnvironmentError, err:
1673
      return False, ("File '%s' under path '%s' is missing (%s)" %
1674
                     (filename, es_dir, utils.ErrnoOrStr(err)))
1675

    
1676
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1677
      return False, ("File '%s' under path '%s' is not a regular file" %
1678
                     (filename, es_dir))
1679

    
1680
    if filename in constants.ES_SCRIPTS:
1681
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
1682
        return False, ("File '%s' under path '%s' is not executable" %
1683
                       (filename, es_dir))
1684

    
1685
  parameters = []
1686
  if constants.ES_PARAMETERS_FILE in es_files:
1687
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
1688
    try:
1689
      parameters = utils.ReadFile(parameters_file).splitlines()
1690
    except EnvironmentError, err:
1691
      return False, ("Error while reading the EXT parameters file at %s: %s" %
1692
                     (parameters_file, utils.ErrnoOrStr(err)))
1693
    parameters = [v.split(None, 1) for v in parameters]
1694

    
1695
  es_obj = \
1696
    objects.ExtStorage(name=name, path=es_dir,
1697
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
1698
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
1699
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
1700
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
1701
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
1702
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
1703
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
1704
                       supported_parameters=parameters)
1705
  return True, es_obj
1706

    
1707

    
1708
def _ExtStorageEnvironment(unique_id, ext_params,
1709
                           size=None, grow=None, metadata=None):
1710
  """Calculate the environment for an External Storage script.
1711

1712
  @type unique_id: tuple (driver, vol_name)
1713
  @param unique_id: ExtStorage pool and name of the Volume
1714
  @type ext_params: dict
1715
  @param ext_params: the EXT parameters
1716
  @type size: string
1717
  @param size: size of the Volume (in mebibytes)
1718
  @type grow: string
1719
  @param grow: new size of Volume after grow (in mebibytes)
1720
  @type metadata: string
1721
  @param metadata: metadata info of the Volume
1722
  @rtype: dict
1723
  @return: dict of environment variables
1724

1725
  """
1726
  vol_name = unique_id[1]
1727

    
1728
  result = {}
1729
  result["VOL_NAME"] = vol_name
1730

    
1731
  # EXT params
1732
  for pname, pvalue in ext_params.items():
1733
    result["EXTP_%s" % pname.upper()] = str(pvalue)
1734

    
1735
  if size is not None:
1736
    result["VOL_SIZE"] = size
1737

    
1738
  if grow is not None:
1739
    result["VOL_NEW_SIZE"] = grow
1740

    
1741
  if metadata is not None:
1742
    result["VOL_METADATA"] = metadata
1743

    
1744
  return result
1745

    
1746

    
1747
def _VolumeLogName(kind, es_name, volume):
1748
  """Compute the ExtStorage log filename for a given Volume and operation.
1749

1750
  @type kind: string
1751
  @param kind: the operation type (e.g. create, remove etc.)
1752
  @type es_name: string
1753
  @param es_name: the ExtStorage name
1754
  @type volume: string
1755
  @param volume: the name of the Volume inside the External Storage
1756

1757
  """
1758
  # Check if the extstorage log dir is a valid dir
1759
  if not os.path.isdir(pathutils.LOG_ES_DIR):
1760
    base.ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
1761

    
1762
  # TODO: Use tempfile.mkstemp to create unique filename
1763
  basename = ("%s-%s-%s-%s.log" %
1764
              (kind, es_name, volume, utils.TimestampForFilename()))
1765
  return utils.PathJoin(pathutils.LOG_ES_DIR, basename)
1766

    
1767

    
1768
DEV_MAP = {
1769
  constants.LD_LV: LogicalVolume,
1770
  constants.LD_DRBD8: drbd.DRBD8Dev,
1771
  constants.LD_BLOCKDEV: PersistentBlockDevice,
1772
  constants.LD_RBD: RADOSBlockDevice,
1773
  constants.LD_EXT: ExtStorageDevice,
1774
  }
1775

    
1776
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
1777
  DEV_MAP[constants.LD_FILE] = FileStorage
1778

    
1779

    
1780
def _VerifyDiskType(dev_type):
1781
  if dev_type not in DEV_MAP:
1782
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
1783

    
1784

    
1785
def _VerifyDiskParams(disk):
1786
  """Verifies if all disk parameters are set.
1787

1788
  """
1789
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
1790
  if missing:
1791
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
1792
                                 missing)
1793

    
1794

    
1795
def FindDevice(disk, children):
1796
  """Search for an existing, assembled device.
1797

1798
  This will succeed only if the device exists and is assembled, but it
1799
  does not do any actions in order to activate the device.
1800

1801
  @type disk: L{objects.Disk}
1802
  @param disk: the disk object to find
1803
  @type children: list of L{bdev.BlockDev}
1804
  @param children: the list of block devices that are children of the device
1805
                  represented by the disk parameter
1806

1807
  """
1808
  _VerifyDiskType(disk.dev_type)
1809
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1810
                                  disk.params)
1811
  if not device.attached:
1812
    return None
1813
  return device
1814

    
1815

    
1816
def Assemble(disk, children):
1817
  """Try to attach or assemble an existing device.
1818

1819
  This will attach to assemble the device, as needed, to bring it
1820
  fully up. It must be safe to run on already-assembled devices.
1821

1822
  @type disk: L{objects.Disk}
1823
  @param disk: the disk object to assemble
1824
  @type children: list of L{bdev.BlockDev}
1825
  @param children: the list of block devices that are children of the device
1826
                  represented by the disk parameter
1827

1828
  """
1829
  _VerifyDiskType(disk.dev_type)
1830
  _VerifyDiskParams(disk)
1831
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1832
                                  disk.params)
1833
  device.Assemble()
1834
  return device
1835

    
1836

    
1837
def Create(disk, children, excl_stor):
1838
  """Create a device.
1839

1840
  @type disk: L{objects.Disk}
1841
  @param disk: the disk object to create
1842
  @type children: list of L{bdev.BlockDev}
1843
  @param children: the list of block devices that are children of the device
1844
                  represented by the disk parameter
1845
  @type excl_stor: boolean
1846
  @param excl_stor: Whether exclusive_storage is active
1847
  @rtype: L{bdev.BlockDev}
1848
  @return: the created device, or C{None} in case of an error
1849

1850
  """
1851
  _VerifyDiskType(disk.dev_type)
1852
  _VerifyDiskParams(disk)
1853
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
1854
                                         disk.spindles, disk.params, excl_stor)
1855
  return device