Statistics
| Branch: | Tag: | Revision:

root / lib / storage / bdev.py @ 24c06acb

History | View | Annotate | Download (55.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Block device abstraction"""
23

    
24
import re
25
import errno
26
import stat
27
import os
28
import logging
29
import math
30

    
31
from ganeti import utils
32
from ganeti import errors
33
from ganeti import constants
34
from ganeti import objects
35
from ganeti import compat
36
from ganeti import pathutils
37
from ganeti import serializer
38
from ganeti.storage import drbd
39
from ganeti.storage import base
40

    
41

    
42
class RbdShowmappedJsonError(Exception):
43
  """`rbd showmmapped' JSON formatting error Exception class.
44

45
  """
46
  pass
47

    
48

    
49
def _CheckResult(result):
50
  """Throws an error if the given result is a failed one.
51

52
  @param result: result from RunCmd
53

54
  """
55
  if result.failed:
56
    base.ThrowError("Command: %s error: %s - %s",
57
                    result.cmd, result.fail_reason, result.output)
58

    
59

    
60
def _GetForbiddenFileStoragePaths():
61
  """Builds a list of path prefixes which shouldn't be used for file storage.
62

63
  @rtype: frozenset
64

65
  """
66
  paths = set([
67
    "/boot",
68
    "/dev",
69
    "/etc",
70
    "/home",
71
    "/proc",
72
    "/root",
73
    "/sys",
74
    ])
75

    
76
  for prefix in ["", "/usr", "/usr/local"]:
77
    paths.update(map(lambda s: "%s/%s" % (prefix, s),
78
                     ["bin", "lib", "lib32", "lib64", "sbin"]))
79

    
80
  return compat.UniqueFrozenset(map(os.path.normpath, paths))
81

    
82

    
83
def _ComputeWrongFileStoragePaths(paths,
84
                                  _forbidden=_GetForbiddenFileStoragePaths()):
85
  """Cross-checks a list of paths for prefixes considered bad.
86

87
  Some paths, e.g. "/bin", should not be used for file storage.
88

89
  @type paths: list
90
  @param paths: List of paths to be checked
91
  @rtype: list
92
  @return: Sorted list of paths for which the user should be warned
93

94
  """
95
  def _Check(path):
96
    return (not os.path.isabs(path) or
97
            path in _forbidden or
98
            filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
99

    
100
  return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
101

    
102

    
103
def ComputeWrongFileStoragePaths(_filename=pathutils.FILE_STORAGE_PATHS_FILE):
104
  """Returns a list of file storage paths whose prefix is considered bad.
105

106
  See L{_ComputeWrongFileStoragePaths}.
107

108
  """
109
  return _ComputeWrongFileStoragePaths(_LoadAllowedFileStoragePaths(_filename))
110

    
111

    
112
def _CheckFileStoragePath(path, allowed):
113
  """Checks if a path is in a list of allowed paths for file storage.
114

115
  @type path: string
116
  @param path: Path to check
117
  @type allowed: list
118
  @param allowed: List of allowed paths
119
  @raise errors.FileStoragePathError: If the path is not allowed
120

121
  """
122
  if not os.path.isabs(path):
123
    raise errors.FileStoragePathError("File storage path must be absolute,"
124
                                      " got '%s'" % path)
125

    
126
  for i in allowed:
127
    if not os.path.isabs(i):
128
      logging.info("Ignoring relative path '%s' for file storage", i)
129
      continue
130

    
131
    if utils.IsBelowDir(i, path):
132
      break
133
  else:
134
    raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
135
                                      " storage" % path)
136

    
137

    
138
def _LoadAllowedFileStoragePaths(filename):
139
  """Loads file containing allowed file storage paths.
140

141
  @rtype: list
142
  @return: List of allowed paths (can be an empty list)
143

144
  """
145
  try:
146
    contents = utils.ReadFile(filename)
147
  except EnvironmentError:
148
    return []
149
  else:
150
    return utils.FilterEmptyLinesAndComments(contents)
151

    
152

    
153
def CheckFileStoragePath(path, _filename=pathutils.FILE_STORAGE_PATHS_FILE):
154
  """Checks if a path is allowed for file storage.
155

156
  @type path: string
157
  @param path: Path to check
158
  @raise errors.FileStoragePathError: If the path is not allowed
159

160
  """
161
  allowed = _LoadAllowedFileStoragePaths(_filename)
162

    
163
  if _ComputeWrongFileStoragePaths([path]):
164
    raise errors.FileStoragePathError("Path '%s' uses a forbidden prefix" %
165
                                      path)
166

    
167
  _CheckFileStoragePath(path, allowed)
168

    
169

    
170
class LogicalVolume(base.BlockDev):
171
  """Logical Volume block device.
172

173
  """
174
  _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
175
  _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
176
  _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
177

    
178
  def __init__(self, unique_id, children, size, params):
179
    """Attaches to a LV device.
180

181
    The unique_id is a tuple (vg_name, lv_name)
182

183
    """
184
    super(LogicalVolume, self).__init__(unique_id, children, size, params)
185
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
186
      raise ValueError("Invalid configuration data %s" % str(unique_id))
187
    self._vg_name, self._lv_name = unique_id
188
    self._ValidateName(self._vg_name)
189
    self._ValidateName(self._lv_name)
190
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
191
    self._degraded = True
192
    self.major = self.minor = self.pe_size = self.stripe_count = None
193
    self.Attach()
194

    
195
  @staticmethod
196
  def _GetStdPvSize(pvs_info):
197
    """Return the the standard PV size (used with exclusive storage).
198

199
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
200
    @rtype: float
201
    @return: size in MiB
202

203
    """
204
    assert len(pvs_info) > 0
205
    smallest = min([pv.size for pv in pvs_info])
206
    return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
207

    
208
  @staticmethod
209
  def _ComputeNumPvs(size, pvs_info):
210
    """Compute the number of PVs needed for an LV (with exclusive storage).
211

212
    @type size: float
213
    @param size: LV size in MiB
214
    @param pvs_info: list of objects.LvmPvInfo, cannot be empty
215
    @rtype: integer
216
    @return: number of PVs needed
217
    """
218
    assert len(pvs_info) > 0
219
    pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
220
    return int(math.ceil(float(size) / pv_size))
221

    
222
  @staticmethod
223
  def _GetEmptyPvNames(pvs_info, max_pvs=None):
224
    """Return a list of empty PVs, by name.
225

226
    """
227
    empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
228
    if max_pvs is not None:
229
      empty_pvs = empty_pvs[:max_pvs]
230
    return map((lambda pv: pv.name), empty_pvs)
231

    
232
  @classmethod
233
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
234
    """Create a new logical volume.
235

236
    """
237
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
238
      raise errors.ProgrammerError("Invalid configuration data %s" %
239
                                   str(unique_id))
240
    vg_name, lv_name = unique_id
241
    cls._ValidateName(vg_name)
242
    cls._ValidateName(lv_name)
243
    pvs_info = cls.GetPVInfo([vg_name])
244
    if not pvs_info:
245
      if excl_stor:
246
        msg = "No (empty) PVs found"
247
      else:
248
        msg = "Can't compute PV info for vg %s" % vg_name
249
      base.ThrowError(msg)
250
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
251

    
252
    pvlist = [pv.name for pv in pvs_info]
253
    if compat.any(":" in v for v in pvlist):
254
      base.ThrowError("Some of your PVs have the invalid character ':' in their"
255
                      " name, this is not supported - please filter them out"
256
                      " in lvm.conf using either 'filter' or 'preferred_names'")
257

    
258
    current_pvs = len(pvlist)
259
    desired_stripes = params[constants.LDP_STRIPES]
260
    stripes = min(current_pvs, desired_stripes)
261

    
262
    if excl_stor:
263
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
264
      if err_msgs:
265
        for m in err_msgs:
266
          logging.warning(m)
267
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
268
      if spindles:
269
        if spindles < req_pvs:
270
          base.ThrowError("Requested number of spindles (%s) is not enough for"
271
                          " a disk of %d MB (at least %d spindles needed)",
272
                          spindles, size, req_pvs)
273
        else:
274
          req_pvs = spindles
275
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
276
      current_pvs = len(pvlist)
277
      if current_pvs < req_pvs:
278
        base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
279
                        " MB: %d available, %d needed",
280
                        size, current_pvs, req_pvs)
281
      assert current_pvs == len(pvlist)
282
      if stripes > current_pvs:
283
        # No warning issued for this, as it's no surprise
284
        stripes = current_pvs
285

    
286
    else:
287
      if stripes < desired_stripes:
288
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
289
                        " available.", desired_stripes, vg_name, current_pvs)
290
      free_size = sum([pv.free for pv in pvs_info])
291
      # The size constraint should have been checked from the master before
292
      # calling the create function.
293
      if free_size < size:
294
        base.ThrowError("Not enough free space: required %s,"
295
                        " available %s", size, free_size)
296

    
297
    # If the free space is not well distributed, we won't be able to
298
    # create an optimally-striped volume; in that case, we want to try
299
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
300
    # stripes
301
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
302
    for stripes_arg in range(stripes, 0, -1):
303
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
304
      if not result.failed:
305
        break
306
    if result.failed:
307
      base.ThrowError("LV create failed (%s): %s",
308
                      result.fail_reason, result.output)
309
    return LogicalVolume(unique_id, children, size, params)
310

    
311
  @staticmethod
312
  def _GetVolumeInfo(lvm_cmd, fields):
313
    """Returns LVM Volume infos using lvm_cmd
314

315
    @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
316
    @param fields: Fields to return
317
    @return: A list of dicts each with the parsed fields
318

319
    """
320
    if not fields:
321
      raise errors.ProgrammerError("No fields specified")
322

    
323
    sep = "|"
324
    cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
325
           "--separator=%s" % sep, "-o%s" % ",".join(fields)]
326

    
327
    result = utils.RunCmd(cmd)
328
    if result.failed:
329
      raise errors.CommandError("Can't get the volume information: %s - %s" %
330
                                (result.fail_reason, result.output))
331

    
332
    data = []
333
    for line in result.stdout.splitlines():
334
      splitted_fields = line.strip().split(sep)
335

    
336
      if len(fields) != len(splitted_fields):
337
        raise errors.CommandError("Can't parse %s output: line '%s'" %
338
                                  (lvm_cmd, line))
339

    
340
      data.append(splitted_fields)
341

    
342
    return data
343

    
344
  @classmethod
345
  def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
346
    """Get the free space info for PVs in a volume group.
347

348
    @param vg_names: list of volume group names, if empty all will be returned
349
    @param filter_allocatable: whether to skip over unallocatable PVs
350
    @param include_lvs: whether to include a list of LVs hosted on each PV
351

352
    @rtype: list
353
    @return: list of objects.LvmPvInfo objects
354

355
    """
356
    # We request "lv_name" field only if we care about LVs, so we don't get
357
    # a long list of entries with many duplicates unless we really have to.
358
    # The duplicate "pv_name" field will be ignored.
359
    if include_lvs:
360
      lvfield = "lv_name"
361
    else:
362
      lvfield = "pv_name"
363
    try:
364
      info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
365
                                        "pv_attr", "pv_size", lvfield])
366
    except errors.GenericError, err:
367
      logging.error("Can't get PV information: %s", err)
368
      return None
369

    
370
    # When asked for LVs, "pvs" may return multiple entries for the same PV-LV
371
    # pair. We sort entries by PV name and then LV name, so it's easy to weed
372
    # out duplicates.
373
    if include_lvs:
374
      info.sort(key=(lambda i: (i[0], i[5])))
375
    data = []
376
    lastpvi = None
377
    for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
378
      # (possibly) skip over pvs which are not allocatable
379
      if filter_allocatable and pv_attr[0] != "a":
380
        continue
381
      # (possibly) skip over pvs which are not in the right volume group(s)
382
      if vg_names and vg_name not in vg_names:
383
        continue
384
      # Beware of duplicates (check before inserting)
385
      if lastpvi and lastpvi.name == pv_name:
386
        if include_lvs and lv_name:
387
          if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
388
            lastpvi.lv_list.append(lv_name)
389
      else:
390
        if include_lvs and lv_name:
391
          lvl = [lv_name]
392
        else:
393
          lvl = []
394
        lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
395
                                    size=float(pv_size), free=float(pv_free),
396
                                    attributes=pv_attr, lv_list=lvl)
397
        data.append(lastpvi)
398

    
399
    return data
400

    
401
  @classmethod
402
  def _GetExclusiveStorageVgFree(cls, vg_name):
403
    """Return the free disk space in the given VG, in exclusive storage mode.
404

405
    @type vg_name: string
406
    @param vg_name: VG name
407
    @rtype: float
408
    @return: free space in MiB
409
    """
410
    pvs_info = cls.GetPVInfo([vg_name])
411
    if not pvs_info:
412
      return 0.0
413
    pv_size = cls._GetStdPvSize(pvs_info)
414
    num_pvs = len(cls._GetEmptyPvNames(pvs_info))
415
    return pv_size * num_pvs
416

    
417
  @classmethod
418
  def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
419
    """Get the free space info for specific VGs.
420

421
    @param vg_names: list of volume group names, if empty all will be returned
422
    @param excl_stor: whether exclusive_storage is enabled
423
    @param filter_readonly: whether to skip over readonly VGs
424

425
    @rtype: list
426
    @return: list of tuples (free_space, total_size, name) with free_space in
427
             MiB
428

429
    """
430
    try:
431
      info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
432
                                        "vg_size"])
433
    except errors.GenericError, err:
434
      logging.error("Can't get VG information: %s", err)
435
      return None
436

    
437
    data = []
438
    for vg_name, vg_free, vg_attr, vg_size in info:
439
      # (possibly) skip over vgs which are not writable
440
      if filter_readonly and vg_attr[0] == "r":
441
        continue
442
      # (possibly) skip over vgs which are not in the right volume group(s)
443
      if vg_names and vg_name not in vg_names:
444
        continue
445
      # Exclusive storage needs a different concept of free space
446
      if excl_stor:
447
        es_free = cls._GetExclusiveStorageVgFree(vg_name)
448
        assert es_free <= vg_free
449
        vg_free = es_free
450
      data.append((float(vg_free), float(vg_size), vg_name))
451

    
452
    return data
453

    
454
  @classmethod
455
  def _ValidateName(cls, name):
456
    """Validates that a given name is valid as VG or LV name.
457

458
    The list of valid characters and restricted names is taken out of
459
    the lvm(8) manpage, with the simplification that we enforce both
460
    VG and LV restrictions on the names.
461

462
    """
463
    if (not cls._VALID_NAME_RE.match(name) or
464
        name in cls._INVALID_NAMES or
465
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
466
      base.ThrowError("Invalid LVM name '%s'", name)
467

    
468
  def Remove(self):
469
    """Remove this logical volume.
470

471
    """
472
    if not self.minor and not self.Attach():
473
      # the LV does not exist
474
      return
475
    result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
476
                           (self._vg_name, self._lv_name)])
477
    if result.failed:
478
      base.ThrowError("Can't lvremove: %s - %s",
479
                      result.fail_reason, result.output)
480

    
481
  def Rename(self, new_id):
482
    """Rename this logical volume.
483

484
    """
485
    if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
486
      raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
487
    new_vg, new_name = new_id
488
    if new_vg != self._vg_name:
489
      raise errors.ProgrammerError("Can't move a logical volume across"
490
                                   " volume groups (from %s to to %s)" %
491
                                   (self._vg_name, new_vg))
492
    result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
493
    if result.failed:
494
      base.ThrowError("Failed to rename the logical volume: %s", result.output)
495
    self._lv_name = new_name
496
    self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
497

    
498
  def Attach(self):
499
    """Attach to an existing LV.
500

501
    This method will try to see if an existing and active LV exists
502
    which matches our name. If so, its major/minor will be
503
    recorded.
504

505
    """
506
    self.attached = False
507
    result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
508
                           "--units=k", "--nosuffix",
509
                           "-olv_attr,lv_kernel_major,lv_kernel_minor,"
510
                           "vg_extent_size,stripes", self.dev_path])
511
    if result.failed:
512
      logging.error("Can't find LV %s: %s, %s",
513
                    self.dev_path, result.fail_reason, result.output)
514
      return False
515
    # the output can (and will) have multiple lines for multi-segment
516
    # LVs, as the 'stripes' parameter is a segment one, so we take
517
    # only the last entry, which is the one we're interested in; note
518
    # that with LVM2 anyway the 'stripes' value must be constant
519
    # across segments, so this is a no-op actually
520
    out = result.stdout.splitlines()
521
    if not out: # totally empty result? splitlines() returns at least
522
                # one line for any non-empty string
523
      logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
524
      return False
525
    out = out[-1].strip().rstrip(",")
526
    out = out.split(",")
527
    if len(out) != 5:
528
      logging.error("Can't parse LVS output, len(%s) != 5", str(out))
529
      return False
530

    
531
    status, major, minor, pe_size, stripes = out
532
    if len(status) < 6:
533
      logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
534
      return False
535

    
536
    try:
537
      major = int(major)
538
      minor = int(minor)
539
    except (TypeError, ValueError), err:
540
      logging.error("lvs major/minor cannot be parsed: %s", str(err))
541

    
542
    try:
543
      pe_size = int(float(pe_size))
544
    except (TypeError, ValueError), err:
545
      logging.error("Can't parse vg extent size: %s", err)
546
      return False
547

    
548
    try:
549
      stripes = int(stripes)
550
    except (TypeError, ValueError), err:
551
      logging.error("Can't parse the number of stripes: %s", err)
552
      return False
553

    
554
    self.major = major
555
    self.minor = minor
556
    self.pe_size = pe_size
557
    self.stripe_count = stripes
558
    self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
559
                                      # storage
560
    self.attached = True
561
    return True
562

    
563
  def Assemble(self):
564
    """Assemble the device.
565

566
    We always run `lvchange -ay` on the LV to ensure it's active before
567
    use, as there were cases when xenvg was not active after boot
568
    (also possibly after disk issues).
569

570
    """
571
    result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
572
    if result.failed:
573
      base.ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
574

    
575
  def Shutdown(self):
576
    """Shutdown the device.
577

578
    This is a no-op for the LV device type, as we don't deactivate the
579
    volumes on shutdown.
580

581
    """
582
    pass
583

    
584
  def GetSyncStatus(self):
585
    """Returns the sync status of the device.
586

587
    If this device is a mirroring device, this function returns the
588
    status of the mirror.
589

590
    For logical volumes, sync_percent and estimated_time are always
591
    None (no recovery in progress, as we don't handle the mirrored LV
592
    case). The is_degraded parameter is the inverse of the ldisk
593
    parameter.
594

595
    For the ldisk parameter, we check if the logical volume has the
596
    'virtual' type, which means it's not backed by existing storage
597
    anymore (read from it return I/O error). This happens after a
598
    physical disk failure and subsequent 'vgreduce --removemissing' on
599
    the volume group.
600

601
    The status was already read in Attach, so we just return it.
602

603
    @rtype: objects.BlockDevStatus
604

605
    """
606
    if self._degraded:
607
      ldisk_status = constants.LDS_FAULTY
608
    else:
609
      ldisk_status = constants.LDS_OKAY
610

    
611
    return objects.BlockDevStatus(dev_path=self.dev_path,
612
                                  major=self.major,
613
                                  minor=self.minor,
614
                                  sync_percent=None,
615
                                  estimated_time=None,
616
                                  is_degraded=self._degraded,
617
                                  ldisk_status=ldisk_status)
618

    
619
  def Open(self, force=False):
620
    """Make the device ready for I/O.
621

622
    This is a no-op for the LV device type.
623

624
    """
625
    pass
626

    
627
  def Close(self):
628
    """Notifies that the device will no longer be used for I/O.
629

630
    This is a no-op for the LV device type.
631

632
    """
633
    pass
634

    
635
  def Snapshot(self, size):
636
    """Create a snapshot copy of an lvm block device.
637

638
    @returns: tuple (vg, lv)
639

640
    """
641
    snap_name = self._lv_name + ".snap"
642

    
643
    # remove existing snapshot if found
644
    snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
645
    base.IgnoreError(snap.Remove)
646

    
647
    vg_info = self.GetVGInfo([self._vg_name], False)
648
    if not vg_info:
649
      base.ThrowError("Can't compute VG info for vg %s", self._vg_name)
650
    free_size, _, _ = vg_info[0]
651
    if free_size < size:
652
      base.ThrowError("Not enough free space: required %s,"
653
                      " available %s", size, free_size)
654

    
655
    _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
656
                               "-n%s" % snap_name, self.dev_path]))
657

    
658
    return (self._vg_name, snap_name)
659

    
660
  def _RemoveOldInfo(self):
661
    """Try to remove old tags from the lv.
662

663
    """
664
    result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
665
                           self.dev_path])
666
    _CheckResult(result)
667

    
668
    raw_tags = result.stdout.strip()
669
    if raw_tags:
670
      for tag in raw_tags.split(","):
671
        _CheckResult(utils.RunCmd(["lvchange", "--deltag",
672
                                   tag.strip(), self.dev_path]))
673

    
674
  def SetInfo(self, text):
675
    """Update metadata with info text.
676

677
    """
678
    base.BlockDev.SetInfo(self, text)
679

    
680
    self._RemoveOldInfo()
681

    
682
    # Replace invalid characters
683
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
684
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
685

    
686
    # Only up to 128 characters are allowed
687
    text = text[:128]
688

    
689
    _CheckResult(utils.RunCmd(["lvchange", "--addtag", text, self.dev_path]))
690

    
691
  def Grow(self, amount, dryrun, backingstore):
692
    """Grow the logical volume.
693

694
    """
695
    if not backingstore:
696
      return
697
    if self.pe_size is None or self.stripe_count is None:
698
      if not self.Attach():
699
        base.ThrowError("Can't attach to LV during Grow()")
700
    full_stripe_size = self.pe_size * self.stripe_count
701
    # pe_size is in KB
702
    amount *= 1024
703
    rest = amount % full_stripe_size
704
    if rest != 0:
705
      amount += full_stripe_size - rest
706
    cmd = ["lvextend", "-L", "+%dk" % amount]
707
    if dryrun:
708
      cmd.append("--test")
709
    # we try multiple algorithms since the 'best' ones might not have
710
    # space available in the right place, but later ones might (since
711
    # they have less constraints); also note that only recent LVM
712
    # supports 'cling'
713
    for alloc_policy in "contiguous", "cling", "normal":
714
      result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
715
      if not result.failed:
716
        return
717
    base.ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
718

    
719

    
720
class FileStorage(base.BlockDev):
721
  """File device.
722

723
  This class represents the a file storage backend device.
724

725
  The unique_id for the file device is a (file_driver, file_path) tuple.
726

727
  """
728
  def __init__(self, unique_id, children, size, params):
729
    """Initalizes a file device backend.
730

731
    """
732
    if children:
733
      raise errors.BlockDeviceError("Invalid setup for file device")
734
    super(FileStorage, self).__init__(unique_id, children, size, params)
735
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
736
      raise ValueError("Invalid configuration data %s" % str(unique_id))
737
    self.driver = unique_id[0]
738
    self.dev_path = unique_id[1]
739

    
740
    CheckFileStoragePath(self.dev_path)
741

    
742
    self.Attach()
743

    
744
  def Assemble(self):
745
    """Assemble the device.
746

747
    Checks whether the file device exists, raises BlockDeviceError otherwise.
748

749
    """
750
    if not os.path.exists(self.dev_path):
751
      base.ThrowError("File device '%s' does not exist" % self.dev_path)
752

    
753
  def Shutdown(self):
754
    """Shutdown the device.
755

756
    This is a no-op for the file type, as we don't deactivate
757
    the file on shutdown.
758

759
    """
760
    pass
761

    
762
  def Open(self, force=False):
763
    """Make the device ready for I/O.
764

765
    This is a no-op for the file type.
766

767
    """
768
    pass
769

    
770
  def Close(self):
771
    """Notifies that the device will no longer be used for I/O.
772

773
    This is a no-op for the file type.
774

775
    """
776
    pass
777

    
778
  def Remove(self):
779
    """Remove the file backing the block device.
780

781
    @rtype: boolean
782
    @return: True if the removal was successful
783

784
    """
785
    try:
786
      os.remove(self.dev_path)
787
    except OSError, err:
788
      if err.errno != errno.ENOENT:
789
        base.ThrowError("Can't remove file '%s': %s", self.dev_path, err)
790

    
791
  def Rename(self, new_id):
792
    """Renames the file.
793

794
    """
795
    # TODO: implement rename for file-based storage
796
    base.ThrowError("Rename is not supported for file-based storage")
797

    
798
  def Grow(self, amount, dryrun, backingstore):
799
    """Grow the file
800

801
    @param amount: the amount (in mebibytes) to grow with
802

803
    """
804
    if not backingstore:
805
      return
806
    # Check that the file exists
807
    self.Assemble()
808
    current_size = self.GetActualSize()
809
    new_size = current_size + amount * 1024 * 1024
810
    assert new_size > current_size, "Cannot Grow with a negative amount"
811
    # We can't really simulate the growth
812
    if dryrun:
813
      return
814
    try:
815
      f = open(self.dev_path, "a+")
816
      f.truncate(new_size)
817
      f.close()
818
    except EnvironmentError, err:
819
      base.ThrowError("Error in file growth: %", str(err))
820

    
821
  def Attach(self):
822
    """Attach to an existing file.
823

824
    Check if this file already exists.
825

826
    @rtype: boolean
827
    @return: True if file exists
828

829
    """
830
    self.attached = os.path.exists(self.dev_path)
831
    return self.attached
832

    
833
  def GetActualSize(self):
834
    """Return the actual disk size.
835

836
    @note: the device needs to be active when this is called
837

838
    """
839
    assert self.attached, "BlockDevice not attached in GetActualSize()"
840
    try:
841
      st = os.stat(self.dev_path)
842
      return st.st_size
843
    except OSError, err:
844
      base.ThrowError("Can't stat %s: %s", self.dev_path, err)
845

    
846
  @classmethod
847
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
848
    """Create a new file.
849

850
    @param size: the size of file in MiB
851

852
    @rtype: L{bdev.FileStorage}
853
    @return: an instance of FileStorage
854

855
    """
856
    if excl_stor:
857
      raise errors.ProgrammerError("FileStorage device requested with"
858
                                   " exclusive_storage")
859
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
860
      raise ValueError("Invalid configuration data %s" % str(unique_id))
861

    
862
    dev_path = unique_id[1]
863

    
864
    CheckFileStoragePath(dev_path)
865

    
866
    try:
867
      fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
868
      f = os.fdopen(fd, "w")
869
      f.truncate(size * 1024 * 1024)
870
      f.close()
871
    except EnvironmentError, err:
872
      if err.errno == errno.EEXIST:
873
        base.ThrowError("File already existing: %s", dev_path)
874
      base.ThrowError("Error in file creation: %", str(err))
875

    
876
    return FileStorage(unique_id, children, size, params)
877

    
878

    
879
class PersistentBlockDevice(base.BlockDev):
880
  """A block device with persistent node
881

882
  May be either directly attached, or exposed through DM (e.g. dm-multipath).
883
  udev helpers are probably required to give persistent, human-friendly
884
  names.
885

886
  For the time being, pathnames are required to lie under /dev.
887

888
  """
889
  def __init__(self, unique_id, children, size, params):
890
    """Attaches to a static block device.
891

892
    The unique_id is a path under /dev.
893

894
    """
895
    super(PersistentBlockDevice, self).__init__(unique_id, children, size,
896
                                                params)
897
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
898
      raise ValueError("Invalid configuration data %s" % str(unique_id))
899
    self.dev_path = unique_id[1]
900
    if not os.path.realpath(self.dev_path).startswith("/dev/"):
901
      raise ValueError("Full path '%s' lies outside /dev" %
902
                              os.path.realpath(self.dev_path))
903
    # TODO: this is just a safety guard checking that we only deal with devices
904
    # we know how to handle. In the future this will be integrated with
905
    # external storage backends and possible values will probably be collected
906
    # from the cluster configuration.
907
    if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
908
      raise ValueError("Got persistent block device of invalid type: %s" %
909
                       unique_id[0])
910

    
911
    self.major = self.minor = None
912
    self.Attach()
913

    
914
  @classmethod
915
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
916
    """Create a new device
917

918
    This is a noop, we only return a PersistentBlockDevice instance
919

920
    """
921
    if excl_stor:
922
      raise errors.ProgrammerError("Persistent block device requested with"
923
                                   " exclusive_storage")
924
    return PersistentBlockDevice(unique_id, children, 0, params)
925

    
926
  def Remove(self):
927
    """Remove a device
928

929
    This is a noop
930

931
    """
932
    pass
933

    
934
  def Rename(self, new_id):
935
    """Rename this device.
936

937
    """
938
    base.ThrowError("Rename is not supported for PersistentBlockDev storage")
939

    
940
  def Attach(self):
941
    """Attach to an existing block device.
942

943

944
    """
945
    self.attached = False
946
    try:
947
      st = os.stat(self.dev_path)
948
    except OSError, err:
949
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
950
      return False
951

    
952
    if not stat.S_ISBLK(st.st_mode):
953
      logging.error("%s is not a block device", self.dev_path)
954
      return False
955

    
956
    self.major = os.major(st.st_rdev)
957
    self.minor = os.minor(st.st_rdev)
958
    self.attached = True
959

    
960
    return True
961

    
962
  def Assemble(self):
963
    """Assemble the device.
964

965
    """
966
    pass
967

    
968
  def Shutdown(self):
969
    """Shutdown the device.
970

971
    """
972
    pass
973

    
974
  def Open(self, force=False):
975
    """Make the device ready for I/O.
976

977
    """
978
    pass
979

    
980
  def Close(self):
981
    """Notifies that the device will no longer be used for I/O.
982

983
    """
984
    pass
985

    
986
  def Grow(self, amount, dryrun, backingstore):
987
    """Grow the logical volume.
988

989
    """
990
    base.ThrowError("Grow is not supported for PersistentBlockDev storage")
991

    
992

    
993
class RADOSBlockDevice(base.BlockDev):
994
  """A RADOS Block Device (rbd).
995

996
  This class implements the RADOS Block Device for the backend. You need
997
  the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
998
  this to be functional.
999

1000
  """
1001
  def __init__(self, unique_id, children, size, params):
1002
    """Attaches to an rbd device.
1003

1004
    """
1005
    super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
1006
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1007
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1008

    
1009
    self.driver, self.rbd_name = unique_id
1010

    
1011
    self.major = self.minor = None
1012
    self.Attach()
1013

    
1014
  @classmethod
1015
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1016
    """Create a new rbd device.
1017

1018
    Provision a new rbd volume inside a RADOS pool.
1019

1020
    """
1021
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1022
      raise errors.ProgrammerError("Invalid configuration data %s" %
1023
                                   str(unique_id))
1024
    if excl_stor:
1025
      raise errors.ProgrammerError("RBD device requested with"
1026
                                   " exclusive_storage")
1027
    rbd_pool = params[constants.LDP_POOL]
1028
    rbd_name = unique_id[1]
1029

    
1030
    # Provision a new rbd volume (Image) inside the RADOS cluster.
1031
    cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
1032
           rbd_name, "--size", "%s" % size]
1033
    result = utils.RunCmd(cmd)
1034
    if result.failed:
1035
      base.ThrowError("rbd creation failed (%s): %s",
1036
                      result.fail_reason, result.output)
1037

    
1038
    return RADOSBlockDevice(unique_id, children, size, params)
1039

    
1040
  def Remove(self):
1041
    """Remove the rbd device.
1042

1043
    """
1044
    rbd_pool = self.params[constants.LDP_POOL]
1045
    rbd_name = self.unique_id[1]
1046

    
1047
    if not self.minor and not self.Attach():
1048
      # The rbd device doesn't exist.
1049
      return
1050

    
1051
    # First shutdown the device (remove mappings).
1052
    self.Shutdown()
1053

    
1054
    # Remove the actual Volume (Image) from the RADOS cluster.
1055
    cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
1056
    result = utils.RunCmd(cmd)
1057
    if result.failed:
1058
      base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
1059
                      result.fail_reason, result.output)
1060

    
1061
  def Rename(self, new_id):
1062
    """Rename this device.
1063

1064
    """
1065
    pass
1066

    
1067
  def Attach(self):
1068
    """Attach to an existing rbd device.
1069

1070
    This method maps the rbd volume that matches our name with
1071
    an rbd device and then attaches to this device.
1072

1073
    """
1074
    self.attached = False
1075

    
1076
    # Map the rbd volume to a block device under /dev
1077
    self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
1078

    
1079
    try:
1080
      st = os.stat(self.dev_path)
1081
    except OSError, err:
1082
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1083
      return False
1084

    
1085
    if not stat.S_ISBLK(st.st_mode):
1086
      logging.error("%s is not a block device", self.dev_path)
1087
      return False
1088

    
1089
    self.major = os.major(st.st_rdev)
1090
    self.minor = os.minor(st.st_rdev)
1091
    self.attached = True
1092

    
1093
    return True
1094

    
1095
  def _MapVolumeToBlockdev(self, unique_id):
1096
    """Maps existing rbd volumes to block devices.
1097

1098
    This method should be idempotent if the mapping already exists.
1099

1100
    @rtype: string
1101
    @return: the block device path that corresponds to the volume
1102

1103
    """
1104
    pool = self.params[constants.LDP_POOL]
1105
    name = unique_id[1]
1106

    
1107
    # Check if the mapping already exists.
1108
    rbd_dev = self._VolumeToBlockdev(pool, name)
1109
    if rbd_dev:
1110
      # The mapping exists. Return it.
1111
      return rbd_dev
1112

    
1113
    # The mapping doesn't exist. Create it.
1114
    map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
1115
    result = utils.RunCmd(map_cmd)
1116
    if result.failed:
1117
      base.ThrowError("rbd map failed (%s): %s",
1118
                      result.fail_reason, result.output)
1119

    
1120
    # Find the corresponding rbd device.
1121
    rbd_dev = self._VolumeToBlockdev(pool, name)
1122
    if not rbd_dev:
1123
      base.ThrowError("rbd map succeeded, but could not find the rbd block"
1124
                      " device in output of showmapped, for volume: %s", name)
1125

    
1126
    # The device was successfully mapped. Return it.
1127
    return rbd_dev
1128

    
1129
  @classmethod
1130
  def _VolumeToBlockdev(cls, pool, volume_name):
1131
    """Do the 'volume name'-to-'rbd block device' resolving.
1132

1133
    @type pool: string
1134
    @param pool: RADOS pool to use
1135
    @type volume_name: string
1136
    @param volume_name: the name of the volume whose device we search for
1137
    @rtype: string or None
1138
    @return: block device path if the volume is mapped, else None
1139

1140
    """
1141
    try:
1142
      # Newer versions of the rbd tool support json output formatting. Use it
1143
      # if available.
1144
      showmap_cmd = [
1145
        constants.RBD_CMD,
1146
        "showmapped",
1147
        "-p",
1148
        pool,
1149
        "--format",
1150
        "json"
1151
        ]
1152
      result = utils.RunCmd(showmap_cmd)
1153
      if result.failed:
1154
        logging.error("rbd JSON output formatting returned error (%s): %s,"
1155
                      "falling back to plain output parsing",
1156
                      result.fail_reason, result.output)
1157
        raise RbdShowmappedJsonError
1158

    
1159
      return cls._ParseRbdShowmappedJson(result.output, volume_name)
1160
    except RbdShowmappedJsonError:
1161
      # For older versions of rbd, we have to parse the plain / text output
1162
      # manually.
1163
      showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
1164
      result = utils.RunCmd(showmap_cmd)
1165
      if result.failed:
1166
        base.ThrowError("rbd showmapped failed (%s): %s",
1167
                        result.fail_reason, result.output)
1168

    
1169
      return cls._ParseRbdShowmappedPlain(result.output, volume_name)
1170

    
1171
  @staticmethod
1172
  def _ParseRbdShowmappedJson(output, volume_name):
1173
    """Parse the json output of `rbd showmapped'.
1174

1175
    This method parses the json output of `rbd showmapped' and returns the rbd
1176
    block device path (e.g. /dev/rbd0) that matches the given rbd volume.
1177

1178
    @type output: string
1179
    @param output: the json output of `rbd showmapped'
1180
    @type volume_name: string
1181
    @param volume_name: the name of the volume whose device we search for
1182
    @rtype: string or None
1183
    @return: block device path if the volume is mapped, else None
1184

1185
    """
1186
    try:
1187
      devices = serializer.LoadJson(output)
1188
    except ValueError, err:
1189
      base.ThrowError("Unable to parse JSON data: %s" % err)
1190

    
1191
    rbd_dev = None
1192
    for d in devices.values(): # pylint: disable=E1103
1193
      try:
1194
        name = d["name"]
1195
      except KeyError:
1196
        base.ThrowError("'name' key missing from json object %s", devices)
1197

    
1198
      if name == volume_name:
1199
        if rbd_dev is not None:
1200
          base.ThrowError("rbd volume %s is mapped more than once", volume_name)
1201

    
1202
        rbd_dev = d["device"]
1203

    
1204
    return rbd_dev
1205

    
1206
  @staticmethod
1207
  def _ParseRbdShowmappedPlain(output, volume_name):
1208
    """Parse the (plain / text) output of `rbd showmapped'.
1209

1210
    This method parses the output of `rbd showmapped' and returns
1211
    the rbd block device path (e.g. /dev/rbd0) that matches the
1212
    given rbd volume.
1213

1214
    @type output: string
1215
    @param output: the plain text output of `rbd showmapped'
1216
    @type volume_name: string
1217
    @param volume_name: the name of the volume whose device we search for
1218
    @rtype: string or None
1219
    @return: block device path if the volume is mapped, else None
1220

1221
    """
1222
    allfields = 5
1223
    volumefield = 2
1224
    devicefield = 4
1225

    
1226
    lines = output.splitlines()
1227

    
1228
    # Try parsing the new output format (ceph >= 0.55).
1229
    splitted_lines = map(lambda l: l.split(), lines)
1230

    
1231
    # Check for empty output.
1232
    if not splitted_lines:
1233
      return None
1234

    
1235
    # Check showmapped output, to determine number of fields.
1236
    field_cnt = len(splitted_lines[0])
1237
    if field_cnt != allfields:
1238
      # Parsing the new format failed. Fallback to parsing the old output
1239
      # format (< 0.55).
1240
      splitted_lines = map(lambda l: l.split("\t"), lines)
1241
      if field_cnt != allfields:
1242
        base.ThrowError("Cannot parse rbd showmapped output expected %s fields,"
1243
                        " found %s", allfields, field_cnt)
1244

    
1245
    matched_lines = \
1246
      filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
1247
             splitted_lines)
1248

    
1249
    if len(matched_lines) > 1:
1250
      base.ThrowError("rbd volume %s mapped more than once", volume_name)
1251

    
1252
    if matched_lines:
1253
      # rbd block device found. Return it.
1254
      rbd_dev = matched_lines[0][devicefield]
1255
      return rbd_dev
1256

    
1257
    # The given volume is not mapped.
1258
    return None
1259

    
1260
  def Assemble(self):
1261
    """Assemble the device.
1262

1263
    """
1264
    pass
1265

    
1266
  def Shutdown(self):
1267
    """Shutdown the device.
1268

1269
    """
1270
    if not self.minor and not self.Attach():
1271
      # The rbd device doesn't exist.
1272
      return
1273

    
1274
    # Unmap the block device from the Volume.
1275
    self._UnmapVolumeFromBlockdev(self.unique_id)
1276

    
1277
    self.minor = None
1278
    self.dev_path = None
1279

    
1280
  def _UnmapVolumeFromBlockdev(self, unique_id):
1281
    """Unmaps the rbd device from the Volume it is mapped.
1282

1283
    Unmaps the rbd device from the Volume it was previously mapped to.
1284
    This method should be idempotent if the Volume isn't mapped.
1285

1286
    """
1287
    pool = self.params[constants.LDP_POOL]
1288
    name = unique_id[1]
1289

    
1290
    # Check if the mapping already exists.
1291
    rbd_dev = self._VolumeToBlockdev(pool, name)
1292

    
1293
    if rbd_dev:
1294
      # The mapping exists. Unmap the rbd device.
1295
      unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
1296
      result = utils.RunCmd(unmap_cmd)
1297
      if result.failed:
1298
        base.ThrowError("rbd unmap failed (%s): %s",
1299
                        result.fail_reason, result.output)
1300

    
1301
  def Open(self, force=False):
1302
    """Make the device ready for I/O.
1303

1304
    """
1305
    pass
1306

    
1307
  def Close(self):
1308
    """Notifies that the device will no longer be used for I/O.
1309

1310
    """
1311
    pass
1312

    
1313
  def Grow(self, amount, dryrun, backingstore):
1314
    """Grow the Volume.
1315

1316
    @type amount: integer
1317
    @param amount: the amount (in mebibytes) to grow with
1318
    @type dryrun: boolean
1319
    @param dryrun: whether to execute the operation in simulation mode
1320
        only, without actually increasing the size
1321

1322
    """
1323
    if not backingstore:
1324
      return
1325
    if not self.Attach():
1326
      base.ThrowError("Can't attach to rbd device during Grow()")
1327

    
1328
    if dryrun:
1329
      # the rbd tool does not support dry runs of resize operations.
1330
      # Since rbd volumes are thinly provisioned, we assume
1331
      # there is always enough free space for the operation.
1332
      return
1333

    
1334
    rbd_pool = self.params[constants.LDP_POOL]
1335
    rbd_name = self.unique_id[1]
1336
    new_size = self.size + amount
1337

    
1338
    # Resize the rbd volume (Image) inside the RADOS cluster.
1339
    cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
1340
           rbd_name, "--size", "%s" % new_size]
1341
    result = utils.RunCmd(cmd)
1342
    if result.failed:
1343
      base.ThrowError("rbd resize failed (%s): %s",
1344
                      result.fail_reason, result.output)
1345

    
1346

    
1347
class ExtStorageDevice(base.BlockDev):
1348
  """A block device provided by an ExtStorage Provider.
1349

1350
  This class implements the External Storage Interface, which means
1351
  handling of the externally provided block devices.
1352

1353
  """
1354
  def __init__(self, unique_id, children, size, params):
1355
    """Attaches to an extstorage block device.
1356

1357
    """
1358
    super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
1359
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1360
      raise ValueError("Invalid configuration data %s" % str(unique_id))
1361

    
1362
    self.driver, self.vol_name = unique_id
1363
    self.ext_params = params
1364

    
1365
    self.major = self.minor = None
1366
    self.Attach()
1367

    
1368
  @classmethod
1369
  def Create(cls, unique_id, children, size, spindles, params, excl_stor):
1370
    """Create a new extstorage device.
1371

1372
    Provision a new volume using an extstorage provider, which will
1373
    then be mapped to a block device.
1374

1375
    """
1376
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
1377
      raise errors.ProgrammerError("Invalid configuration data %s" %
1378
                                   str(unique_id))
1379
    if excl_stor:
1380
      raise errors.ProgrammerError("extstorage device requested with"
1381
                                   " exclusive_storage")
1382

    
1383
    # Call the External Storage's create script,
1384
    # to provision a new Volume inside the External Storage
1385
    _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
1386
                      params, str(size))
1387

    
1388
    return ExtStorageDevice(unique_id, children, size, params)
1389

    
1390
  def Remove(self):
1391
    """Remove the extstorage device.
1392

1393
    """
1394
    if not self.minor and not self.Attach():
1395
      # The extstorage device doesn't exist.
1396
      return
1397

    
1398
    # First shutdown the device (remove mappings).
1399
    self.Shutdown()
1400

    
1401
    # Call the External Storage's remove script,
1402
    # to remove the Volume from the External Storage
1403
    _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id,
1404
                      self.ext_params)
1405

    
1406
  def Rename(self, new_id):
1407
    """Rename this device.
1408

1409
    """
1410
    pass
1411

    
1412
  def Attach(self):
1413
    """Attach to an existing extstorage device.
1414

1415
    This method maps the extstorage volume that matches our name with
1416
    a corresponding block device and then attaches to this device.
1417

1418
    """
1419
    self.attached = False
1420

    
1421
    # Call the External Storage's attach script,
1422
    # to attach an existing Volume to a block device under /dev
1423
    self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
1424
                                      self.unique_id, self.ext_params)
1425

    
1426
    try:
1427
      st = os.stat(self.dev_path)
1428
    except OSError, err:
1429
      logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
1430
      return False
1431

    
1432
    if not stat.S_ISBLK(st.st_mode):
1433
      logging.error("%s is not a block device", self.dev_path)
1434
      return False
1435

    
1436
    self.major = os.major(st.st_rdev)
1437
    self.minor = os.minor(st.st_rdev)
1438
    self.attached = True
1439

    
1440
    return True
1441

    
1442
  def Assemble(self):
1443
    """Assemble the device.
1444

1445
    """
1446
    pass
1447

    
1448
  def Shutdown(self):
1449
    """Shutdown the device.
1450

1451
    """
1452
    if not self.minor and not self.Attach():
1453
      # The extstorage device doesn't exist.
1454
      return
1455

    
1456
    # Call the External Storage's detach script,
1457
    # to detach an existing Volume from it's block device under /dev
1458
    _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
1459
                      self.ext_params)
1460

    
1461
    self.minor = None
1462
    self.dev_path = None
1463

    
1464
  def Open(self, force=False):
1465
    """Make the device ready for I/O.
1466

1467
    """
1468
    pass
1469

    
1470
  def Close(self):
1471
    """Notifies that the device will no longer be used for I/O.
1472

1473
    """
1474
    pass
1475

    
1476
  def Grow(self, amount, dryrun, backingstore):
1477
    """Grow the Volume.
1478

1479
    @type amount: integer
1480
    @param amount: the amount (in mebibytes) to grow with
1481
    @type dryrun: boolean
1482
    @param dryrun: whether to execute the operation in simulation mode
1483
        only, without actually increasing the size
1484

1485
    """
1486
    if not backingstore:
1487
      return
1488
    if not self.Attach():
1489
      base.ThrowError("Can't attach to extstorage device during Grow()")
1490

    
1491
    if dryrun:
1492
      # we do not support dry runs of resize operations for now.
1493
      return
1494

    
1495
    new_size = self.size + amount
1496

    
1497
    # Call the External Storage's grow script,
1498
    # to grow an existing Volume inside the External Storage
1499
    _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
1500
                      self.ext_params, str(self.size), grow=str(new_size))
1501

    
1502
  def SetInfo(self, text):
1503
    """Update metadata with info text.
1504

1505
    """
1506
    # Replace invalid characters
1507
    text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
1508
    text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
1509

    
1510
    # Only up to 128 characters are allowed
1511
    text = text[:128]
1512

    
1513
    # Call the External Storage's setinfo script,
1514
    # to set metadata for an existing Volume inside the External Storage
1515
    _ExtStorageAction(constants.ES_ACTION_SETINFO, self.unique_id,
1516
                      self.ext_params, metadata=text)
1517

    
1518

    
1519
def _ExtStorageAction(action, unique_id, ext_params,
1520
                      size=None, grow=None, metadata=None):
1521
  """Take an External Storage action.
1522

1523
  Take an External Storage action concerning or affecting
1524
  a specific Volume inside the External Storage.
1525

1526
  @type action: string
1527
  @param action: which action to perform. One of:
1528
                 create / remove / grow / attach / detach
1529
  @type unique_id: tuple (driver, vol_name)
1530
  @param unique_id: a tuple containing the type of ExtStorage (driver)
1531
                    and the Volume name
1532
  @type ext_params: dict
1533
  @param ext_params: ExtStorage parameters
1534
  @type size: integer
1535
  @param size: the size of the Volume in mebibytes
1536
  @type grow: integer
1537
  @param grow: the new size in mebibytes (after grow)
1538
  @type metadata: string
1539
  @param metadata: metadata info of the Volume, for use by the provider
1540
  @rtype: None or a block device path (during attach)
1541

1542
  """
1543
  driver, vol_name = unique_id
1544

    
1545
  # Create an External Storage instance of type `driver'
1546
  status, inst_es = ExtStorageFromDisk(driver)
1547
  if not status:
1548
    base.ThrowError("%s" % inst_es)
1549

    
1550
  # Create the basic environment for the driver's scripts
1551
  create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
1552
                                      grow, metadata)
1553

    
1554
  # Do not use log file for action `attach' as we need
1555
  # to get the output from RunResult
1556
  # TODO: find a way to have a log file for attach too
1557
  logfile = None
1558
  if action is not constants.ES_ACTION_ATTACH:
1559
    logfile = _VolumeLogName(action, driver, vol_name)
1560

    
1561
  # Make sure the given action results in a valid script
1562
  if action not in constants.ES_SCRIPTS:
1563
    base.ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
1564
                    action)
1565

    
1566
  # Find out which external script to run according the given action
1567
  script_name = action + "_script"
1568
  script = getattr(inst_es, script_name)
1569

    
1570
  # Run the external script
1571
  result = utils.RunCmd([script], env=create_env,
1572
                        cwd=inst_es.path, output=logfile,)
1573
  if result.failed:
1574
    logging.error("External storage's %s command '%s' returned"
1575
                  " error: %s, logfile: %s, output: %s",
1576
                  action, result.cmd, result.fail_reason,
1577
                  logfile, result.output)
1578

    
1579
    # If logfile is 'None' (during attach), it breaks TailFile
1580
    # TODO: have a log file for attach too
1581
    if action is not constants.ES_ACTION_ATTACH:
1582
      lines = [utils.SafeEncode(val)
1583
               for val in utils.TailFile(logfile, lines=20)]
1584
    else:
1585
      lines = result.output[-20:]
1586

    
1587
    base.ThrowError("External storage's %s script failed (%s), last"
1588
                    " lines of output:\n%s",
1589
                    action, result.fail_reason, "\n".join(lines))
1590

    
1591
  if action == constants.ES_ACTION_ATTACH:
1592
    return result.stdout
1593

    
1594

    
1595
def ExtStorageFromDisk(name, base_dir=None):
1596
  """Create an ExtStorage instance from disk.
1597

1598
  This function will return an ExtStorage instance
1599
  if the given name is a valid ExtStorage name.
1600

1601
  @type base_dir: string
1602
  @keyword base_dir: Base directory containing ExtStorage installations.
1603
                     Defaults to a search in all the ES_SEARCH_PATH dirs.
1604
  @rtype: tuple
1605
  @return: True and the ExtStorage instance if we find a valid one, or
1606
      False and the diagnose message on error
1607

1608
  """
1609
  if base_dir is None:
1610
    es_base_dir = pathutils.ES_SEARCH_PATH
1611
  else:
1612
    es_base_dir = [base_dir]
1613

    
1614
  es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
1615

    
1616
  if es_dir is None:
1617
    return False, ("Directory for External Storage Provider %s not"
1618
                   " found in search path" % name)
1619

    
1620
  # ES Files dictionary, we will populate it with the absolute path
1621
  # names; if the value is True, then it is a required file, otherwise
1622
  # an optional one
1623
  es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
1624

    
1625
  es_files[constants.ES_PARAMETERS_FILE] = True
1626

    
1627
  for (filename, _) in es_files.items():
1628
    es_files[filename] = utils.PathJoin(es_dir, filename)
1629

    
1630
    try:
1631
      st = os.stat(es_files[filename])
1632
    except EnvironmentError, err:
1633
      return False, ("File '%s' under path '%s' is missing (%s)" %
1634
                     (filename, es_dir, utils.ErrnoOrStr(err)))
1635

    
1636
    if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1637
      return False, ("File '%s' under path '%s' is not a regular file" %
1638
                     (filename, es_dir))
1639

    
1640
    if filename in constants.ES_SCRIPTS:
1641
      if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
1642
        return False, ("File '%s' under path '%s' is not executable" %
1643
                       (filename, es_dir))
1644

    
1645
  parameters = []
1646
  if constants.ES_PARAMETERS_FILE in es_files:
1647
    parameters_file = es_files[constants.ES_PARAMETERS_FILE]
1648
    try:
1649
      parameters = utils.ReadFile(parameters_file).splitlines()
1650
    except EnvironmentError, err:
1651
      return False, ("Error while reading the EXT parameters file at %s: %s" %
1652
                     (parameters_file, utils.ErrnoOrStr(err)))
1653
    parameters = [v.split(None, 1) for v in parameters]
1654

    
1655
  es_obj = \
1656
    objects.ExtStorage(name=name, path=es_dir,
1657
                       create_script=es_files[constants.ES_SCRIPT_CREATE],
1658
                       remove_script=es_files[constants.ES_SCRIPT_REMOVE],
1659
                       grow_script=es_files[constants.ES_SCRIPT_GROW],
1660
                       attach_script=es_files[constants.ES_SCRIPT_ATTACH],
1661
                       detach_script=es_files[constants.ES_SCRIPT_DETACH],
1662
                       setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
1663
                       verify_script=es_files[constants.ES_SCRIPT_VERIFY],
1664
                       supported_parameters=parameters)
1665
  return True, es_obj
1666

    
1667

    
1668
def _ExtStorageEnvironment(unique_id, ext_params,
1669
                           size=None, grow=None, metadata=None):
1670
  """Calculate the environment for an External Storage script.
1671

1672
  @type unique_id: tuple (driver, vol_name)
1673
  @param unique_id: ExtStorage pool and name of the Volume
1674
  @type ext_params: dict
1675
  @param ext_params: the EXT parameters
1676
  @type size: string
1677
  @param size: size of the Volume (in mebibytes)
1678
  @type grow: string
1679
  @param grow: new size of Volume after grow (in mebibytes)
1680
  @type metadata: string
1681
  @param metadata: metadata info of the Volume
1682
  @rtype: dict
1683
  @return: dict of environment variables
1684

1685
  """
1686
  vol_name = unique_id[1]
1687

    
1688
  result = {}
1689
  result["VOL_NAME"] = vol_name
1690

    
1691
  # EXT params
1692
  for pname, pvalue in ext_params.items():
1693
    result["EXTP_%s" % pname.upper()] = str(pvalue)
1694

    
1695
  if size is not None:
1696
    result["VOL_SIZE"] = size
1697

    
1698
  if grow is not None:
1699
    result["VOL_NEW_SIZE"] = grow
1700

    
1701
  if metadata is not None:
1702
    result["VOL_METADATA"] = metadata
1703

    
1704
  return result
1705

    
1706

    
1707
def _VolumeLogName(kind, es_name, volume):
1708
  """Compute the ExtStorage log filename for a given Volume and operation.
1709

1710
  @type kind: string
1711
  @param kind: the operation type (e.g. create, remove etc.)
1712
  @type es_name: string
1713
  @param es_name: the ExtStorage name
1714
  @type volume: string
1715
  @param volume: the name of the Volume inside the External Storage
1716

1717
  """
1718
  # Check if the extstorage log dir is a valid dir
1719
  if not os.path.isdir(pathutils.LOG_ES_DIR):
1720
    base.ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
1721

    
1722
  # TODO: Use tempfile.mkstemp to create unique filename
1723
  basename = ("%s-%s-%s-%s.log" %
1724
              (kind, es_name, volume, utils.TimestampForFilename()))
1725
  return utils.PathJoin(pathutils.LOG_ES_DIR, basename)
1726

    
1727

    
1728
DEV_MAP = {
1729
  constants.LD_LV: LogicalVolume,
1730
  constants.LD_DRBD8: drbd.DRBD8Dev,
1731
  constants.LD_BLOCKDEV: PersistentBlockDevice,
1732
  constants.LD_RBD: RADOSBlockDevice,
1733
  constants.LD_EXT: ExtStorageDevice,
1734
  }
1735

    
1736
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
1737
  DEV_MAP[constants.LD_FILE] = FileStorage
1738

    
1739

    
1740
def _VerifyDiskType(dev_type):
1741
  if dev_type not in DEV_MAP:
1742
    raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type)
1743

    
1744

    
1745
def _VerifyDiskParams(disk):
1746
  """Verifies if all disk parameters are set.
1747

1748
  """
1749
  missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
1750
  if missing:
1751
    raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
1752
                                 missing)
1753

    
1754

    
1755
def FindDevice(disk, children):
1756
  """Search for an existing, assembled device.
1757

1758
  This will succeed only if the device exists and is assembled, but it
1759
  does not do any actions in order to activate the device.
1760

1761
  @type disk: L{objects.Disk}
1762
  @param disk: the disk object to find
1763
  @type children: list of L{bdev.BlockDev}
1764
  @param children: the list of block devices that are children of the device
1765
                  represented by the disk parameter
1766

1767
  """
1768
  _VerifyDiskType(disk.dev_type)
1769
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1770
                                  disk.params)
1771
  if not device.attached:
1772
    return None
1773
  return device
1774

    
1775

    
1776
def Assemble(disk, children):
1777
  """Try to attach or assemble an existing device.
1778

1779
  This will attach to assemble the device, as needed, to bring it
1780
  fully up. It must be safe to run on already-assembled devices.
1781

1782
  @type disk: L{objects.Disk}
1783
  @param disk: the disk object to assemble
1784
  @type children: list of L{bdev.BlockDev}
1785
  @param children: the list of block devices that are children of the device
1786
                  represented by the disk parameter
1787

1788
  """
1789
  _VerifyDiskType(disk.dev_type)
1790
  _VerifyDiskParams(disk)
1791
  device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
1792
                                  disk.params)
1793
  device.Assemble()
1794
  return device
1795

    
1796

    
1797
def Create(disk, children, excl_stor):
1798
  """Create a device.
1799

1800
  @type disk: L{objects.Disk}
1801
  @param disk: the disk object to create
1802
  @type children: list of L{bdev.BlockDev}
1803
  @param children: the list of block devices that are children of the device
1804
                  represented by the disk parameter
1805
  @type excl_stor: boolean
1806
  @param excl_stor: Whether exclusive_storage is active
1807
  @rtype: L{bdev.BlockDev}
1808
  @return: the created device, or C{None} in case of an error
1809

1810
  """
1811
  _VerifyDiskType(disk.dev_type)
1812
  _VerifyDiskParams(disk)
1813
  device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
1814
                                         disk.spindles, disk.params, excl_stor)
1815
  return device