root / lib / bdev.py @ cad0723b
History | View | Annotate | Download (87 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Block device abstraction"""
|
23 |
|
24 |
import re |
25 |
import time |
26 |
import errno |
27 |
import shlex |
28 |
import stat |
29 |
import pyparsing as pyp |
30 |
import os |
31 |
import logging |
32 |
|
33 |
from ganeti import utils |
34 |
from ganeti import errors |
35 |
from ganeti import constants |
36 |
from ganeti import objects |
37 |
from ganeti import compat |
38 |
from ganeti import netutils |
39 |
|
40 |
|
41 |
# Size of reads in _CanReadDevice
|
42 |
_DEVICE_READ_SIZE = 128 * 1024 |
43 |
|
44 |
|
45 |
def _IgnoreError(fn, *args, **kwargs): |
46 |
"""Executes the given function, ignoring BlockDeviceErrors.
|
47 |
|
48 |
This is used in order to simplify the execution of cleanup or
|
49 |
rollback functions.
|
50 |
|
51 |
@rtype: boolean
|
52 |
@return: True when fn didn't raise an exception, False otherwise
|
53 |
|
54 |
"""
|
55 |
try:
|
56 |
fn(*args, **kwargs) |
57 |
return True |
58 |
except errors.BlockDeviceError, err:
|
59 |
logging.warning("Caught BlockDeviceError but ignoring: %s", str(err)) |
60 |
return False |
61 |
|
62 |
|
63 |
def _ThrowError(msg, *args): |
64 |
"""Log an error to the node daemon and the raise an exception.
|
65 |
|
66 |
@type msg: string
|
67 |
@param msg: the text of the exception
|
68 |
@raise errors.BlockDeviceError
|
69 |
|
70 |
"""
|
71 |
if args:
|
72 |
msg = msg % args |
73 |
logging.error(msg) |
74 |
raise errors.BlockDeviceError(msg)
|
75 |
|
76 |
|
77 |
def _CanReadDevice(path): |
78 |
"""Check if we can read from the given device.
|
79 |
|
80 |
This tries to read the first 128k of the device.
|
81 |
|
82 |
"""
|
83 |
try:
|
84 |
utils.ReadFile(path, size=_DEVICE_READ_SIZE) |
85 |
return True |
86 |
except EnvironmentError: |
87 |
logging.warning("Can't read from device %s", path, exc_info=True) |
88 |
return False |
89 |
|
90 |
|
91 |
class BlockDev(object): |
92 |
"""Block device abstract class.
|
93 |
|
94 |
A block device can be in the following states:
|
95 |
- not existing on the system, and by `Create()` it goes into:
|
96 |
- existing but not setup/not active, and by `Assemble()` goes into:
|
97 |
- active read-write and by `Open()` it goes into
|
98 |
- online (=used, or ready for use)
|
99 |
|
100 |
A device can also be online but read-only, however we are not using
|
101 |
the readonly state (LV has it, if needed in the future) and we are
|
102 |
usually looking at this like at a stack, so it's easier to
|
103 |
conceptualise the transition from not-existing to online and back
|
104 |
like a linear one.
|
105 |
|
106 |
The many different states of the device are due to the fact that we
|
107 |
need to cover many device types:
|
108 |
- logical volumes are created, lvchange -a y $lv, and used
|
109 |
- drbd devices are attached to a local disk/remote peer and made primary
|
110 |
|
111 |
A block device is identified by three items:
|
112 |
- the /dev path of the device (dynamic)
|
113 |
- a unique ID of the device (static)
|
114 |
- it's major/minor pair (dynamic)
|
115 |
|
116 |
Not all devices implement both the first two as distinct items. LVM
|
117 |
logical volumes have their unique ID (the pair volume group, logical
|
118 |
volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
|
119 |
the /dev path is again dynamic and the unique id is the pair (host1,
|
120 |
dev1), (host2, dev2).
|
121 |
|
122 |
You can get to a device in two ways:
|
123 |
- creating the (real) device, which returns you
|
124 |
an attached instance (lvcreate)
|
125 |
- attaching of a python instance to an existing (real) device
|
126 |
|
127 |
The second point, the attachement to a device, is different
|
128 |
depending on whether the device is assembled or not. At init() time,
|
129 |
we search for a device with the same unique_id as us. If found,
|
130 |
good. It also means that the device is already assembled. If not,
|
131 |
after assembly we'll have our correct major/minor.
|
132 |
|
133 |
"""
|
134 |
def __init__(self, unique_id, children, size, params): |
135 |
self._children = children
|
136 |
self.dev_path = None |
137 |
self.unique_id = unique_id
|
138 |
self.major = None |
139 |
self.minor = None |
140 |
self.attached = False |
141 |
self.size = size
|
142 |
self.params = params
|
143 |
|
144 |
def Assemble(self): |
145 |
"""Assemble the device from its components.
|
146 |
|
147 |
Implementations of this method by child classes must ensure that:
|
148 |
- after the device has been assembled, it knows its major/minor
|
149 |
numbers; this allows other devices (usually parents) to probe
|
150 |
correctly for their children
|
151 |
- calling this method on an existing, in-use device is safe
|
152 |
- if the device is already configured (and in an OK state),
|
153 |
this method is idempotent
|
154 |
|
155 |
"""
|
156 |
pass
|
157 |
|
158 |
def Attach(self): |
159 |
"""Find a device which matches our config and attach to it.
|
160 |
|
161 |
"""
|
162 |
raise NotImplementedError |
163 |
|
164 |
def Close(self): |
165 |
"""Notifies that the device will no longer be used for I/O.
|
166 |
|
167 |
"""
|
168 |
raise NotImplementedError |
169 |
|
170 |
@classmethod
|
171 |
def Create(cls, unique_id, children, size, params): |
172 |
"""Create the device.
|
173 |
|
174 |
If the device cannot be created, it will return None
|
175 |
instead. Error messages go to the logging system.
|
176 |
|
177 |
Note that for some devices, the unique_id is used, and for other,
|
178 |
the children. The idea is that these two, taken together, are
|
179 |
enough for both creation and assembly (later).
|
180 |
|
181 |
"""
|
182 |
raise NotImplementedError |
183 |
|
184 |
def Remove(self): |
185 |
"""Remove this device.
|
186 |
|
187 |
This makes sense only for some of the device types: LV and file
|
188 |
storage. Also note that if the device can't attach, the removal
|
189 |
can't be completed.
|
190 |
|
191 |
"""
|
192 |
raise NotImplementedError |
193 |
|
194 |
def Rename(self, new_id): |
195 |
"""Rename this device.
|
196 |
|
197 |
This may or may not make sense for a given device type.
|
198 |
|
199 |
"""
|
200 |
raise NotImplementedError |
201 |
|
202 |
def Open(self, force=False): |
203 |
"""Make the device ready for use.
|
204 |
|
205 |
This makes the device ready for I/O. For now, just the DRBD
|
206 |
devices need this.
|
207 |
|
208 |
The force parameter signifies that if the device has any kind of
|
209 |
--force thing, it should be used, we know what we are doing.
|
210 |
|
211 |
"""
|
212 |
raise NotImplementedError |
213 |
|
214 |
def Shutdown(self): |
215 |
"""Shut down the device, freeing its children.
|
216 |
|
217 |
This undoes the `Assemble()` work, except for the child
|
218 |
assembling; as such, the children on the device are still
|
219 |
assembled after this call.
|
220 |
|
221 |
"""
|
222 |
raise NotImplementedError |
223 |
|
224 |
def SetSyncParams(self, params): |
225 |
"""Adjust the synchronization parameters of the mirror.
|
226 |
|
227 |
In case this is not a mirroring device, this is no-op.
|
228 |
|
229 |
@param params: dictionary of LD level disk parameters related to the
|
230 |
synchronization.
|
231 |
@rtype: list
|
232 |
@return: a list of error messages, emitted both by the current node and by
|
233 |
children. An empty list means no errors.
|
234 |
|
235 |
"""
|
236 |
result = [] |
237 |
if self._children: |
238 |
for child in self._children: |
239 |
result.extend(child.SetSyncParams(params)) |
240 |
return result
|
241 |
|
242 |
def PauseResumeSync(self, pause): |
243 |
"""Pause/Resume the sync of the mirror.
|
244 |
|
245 |
In case this is not a mirroring device, this is no-op.
|
246 |
|
247 |
@param pause: Whether to pause or resume
|
248 |
|
249 |
"""
|
250 |
result = True
|
251 |
if self._children: |
252 |
for child in self._children: |
253 |
result = result and child.PauseResumeSync(pause)
|
254 |
return result
|
255 |
|
256 |
def GetSyncStatus(self): |
257 |
"""Returns the sync status of the device.
|
258 |
|
259 |
If this device is a mirroring device, this function returns the
|
260 |
status of the mirror.
|
261 |
|
262 |
If sync_percent is None, it means the device is not syncing.
|
263 |
|
264 |
If estimated_time is None, it means we can't estimate
|
265 |
the time needed, otherwise it's the time left in seconds.
|
266 |
|
267 |
If is_degraded is True, it means the device is missing
|
268 |
redundancy. This is usually a sign that something went wrong in
|
269 |
the device setup, if sync_percent is None.
|
270 |
|
271 |
The ldisk parameter represents the degradation of the local
|
272 |
data. This is only valid for some devices, the rest will always
|
273 |
return False (not degraded).
|
274 |
|
275 |
@rtype: objects.BlockDevStatus
|
276 |
|
277 |
"""
|
278 |
return objects.BlockDevStatus(dev_path=self.dev_path, |
279 |
major=self.major,
|
280 |
minor=self.minor,
|
281 |
sync_percent=None,
|
282 |
estimated_time=None,
|
283 |
is_degraded=False,
|
284 |
ldisk_status=constants.LDS_OKAY) |
285 |
|
286 |
def CombinedSyncStatus(self): |
287 |
"""Calculate the mirror status recursively for our children.
|
288 |
|
289 |
The return value is the same as for `GetSyncStatus()` except the
|
290 |
minimum percent and maximum time are calculated across our
|
291 |
children.
|
292 |
|
293 |
@rtype: objects.BlockDevStatus
|
294 |
|
295 |
"""
|
296 |
status = self.GetSyncStatus()
|
297 |
|
298 |
min_percent = status.sync_percent |
299 |
max_time = status.estimated_time |
300 |
is_degraded = status.is_degraded |
301 |
ldisk_status = status.ldisk_status |
302 |
|
303 |
if self._children: |
304 |
for child in self._children: |
305 |
child_status = child.GetSyncStatus() |
306 |
|
307 |
if min_percent is None: |
308 |
min_percent = child_status.sync_percent |
309 |
elif child_status.sync_percent is not None: |
310 |
min_percent = min(min_percent, child_status.sync_percent)
|
311 |
|
312 |
if max_time is None: |
313 |
max_time = child_status.estimated_time |
314 |
elif child_status.estimated_time is not None: |
315 |
max_time = max(max_time, child_status.estimated_time)
|
316 |
|
317 |
is_degraded = is_degraded or child_status.is_degraded
|
318 |
|
319 |
if ldisk_status is None: |
320 |
ldisk_status = child_status.ldisk_status |
321 |
elif child_status.ldisk_status is not None: |
322 |
ldisk_status = max(ldisk_status, child_status.ldisk_status)
|
323 |
|
324 |
return objects.BlockDevStatus(dev_path=self.dev_path, |
325 |
major=self.major,
|
326 |
minor=self.minor,
|
327 |
sync_percent=min_percent, |
328 |
estimated_time=max_time, |
329 |
is_degraded=is_degraded, |
330 |
ldisk_status=ldisk_status) |
331 |
|
332 |
def SetInfo(self, text): |
333 |
"""Update metadata with info text.
|
334 |
|
335 |
Only supported for some device types.
|
336 |
|
337 |
"""
|
338 |
for child in self._children: |
339 |
child.SetInfo(text) |
340 |
|
341 |
def Grow(self, amount, dryrun, backingstore): |
342 |
"""Grow the block device.
|
343 |
|
344 |
@type amount: integer
|
345 |
@param amount: the amount (in mebibytes) to grow with
|
346 |
@type dryrun: boolean
|
347 |
@param dryrun: whether to execute the operation in simulation mode
|
348 |
only, without actually increasing the size
|
349 |
@param backingstore: whether to execute the operation on backing storage
|
350 |
only, or on "logical" storage only; e.g. DRBD is logical storage,
|
351 |
whereas LVM, file, RBD are backing storage
|
352 |
|
353 |
"""
|
354 |
raise NotImplementedError |
355 |
|
356 |
def GetActualSize(self): |
357 |
"""Return the actual disk size.
|
358 |
|
359 |
@note: the device needs to be active when this is called
|
360 |
|
361 |
"""
|
362 |
assert self.attached, "BlockDevice not attached in GetActualSize()" |
363 |
result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path]) |
364 |
if result.failed:
|
365 |
_ThrowError("blockdev failed (%s): %s",
|
366 |
result.fail_reason, result.output) |
367 |
try:
|
368 |
sz = int(result.output.strip())
|
369 |
except (ValueError, TypeError), err: |
370 |
_ThrowError("Failed to parse blockdev output: %s", str(err)) |
371 |
return sz
|
372 |
|
373 |
def __repr__(self): |
374 |
return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" % |
375 |
(self.__class__, self.unique_id, self._children, |
376 |
self.major, self.minor, self.dev_path)) |
377 |
|
378 |
|
379 |
class LogicalVolume(BlockDev): |
380 |
"""Logical Volume block device.
|
381 |
|
382 |
"""
|
383 |
_VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
|
384 |
_INVALID_NAMES = frozenset([".", "..", "snapshot", "pvmove"]) |
385 |
_INVALID_SUBSTRINGS = frozenset(["_mlog", "_mimage"]) |
386 |
|
387 |
def __init__(self, unique_id, children, size, params): |
388 |
"""Attaches to a LV device.
|
389 |
|
390 |
The unique_id is a tuple (vg_name, lv_name)
|
391 |
|
392 |
"""
|
393 |
super(LogicalVolume, self).__init__(unique_id, children, size, params) |
394 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
395 |
raise ValueError("Invalid configuration data %s" % str(unique_id)) |
396 |
self._vg_name, self._lv_name = unique_id |
397 |
self._ValidateName(self._vg_name) |
398 |
self._ValidateName(self._lv_name) |
399 |
self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name) |
400 |
self._degraded = True |
401 |
self.major = self.minor = self.pe_size = self.stripe_count = None |
402 |
self.Attach()
|
403 |
|
404 |
@classmethod
|
405 |
def Create(cls, unique_id, children, size, params): |
406 |
"""Create a new logical volume.
|
407 |
|
408 |
"""
|
409 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
410 |
raise errors.ProgrammerError("Invalid configuration data %s" % |
411 |
str(unique_id))
|
412 |
vg_name, lv_name = unique_id |
413 |
cls._ValidateName(vg_name) |
414 |
cls._ValidateName(lv_name) |
415 |
pvs_info = cls.GetPVInfo([vg_name]) |
416 |
if not pvs_info: |
417 |
_ThrowError("Can't compute PV info for vg %s", vg_name)
|
418 |
pvs_info.sort() |
419 |
pvs_info.reverse() |
420 |
|
421 |
pvlist = [pv[1] for pv in pvs_info] |
422 |
if compat.any(":" in v for v in pvlist): |
423 |
_ThrowError("Some of your PVs have the invalid character ':' in their"
|
424 |
" name, this is not supported - please filter them out"
|
425 |
" in lvm.conf using either 'filter' or 'preferred_names'")
|
426 |
free_size = sum([pv[0] for pv in pvs_info]) |
427 |
current_pvs = len(pvlist)
|
428 |
desired_stripes = params[constants.LDP_STRIPES] |
429 |
stripes = min(current_pvs, desired_stripes)
|
430 |
if stripes < desired_stripes:
|
431 |
logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
|
432 |
" available.", desired_stripes, vg_name, current_pvs)
|
433 |
|
434 |
# The size constraint should have been checked from the master before
|
435 |
# calling the create function.
|
436 |
if free_size < size:
|
437 |
_ThrowError("Not enough free space: required %s,"
|
438 |
" available %s", size, free_size)
|
439 |
cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name] |
440 |
# If the free space is not well distributed, we won't be able to
|
441 |
# create an optimally-striped volume; in that case, we want to try
|
442 |
# with N, N-1, ..., 2, and finally 1 (non-stripped) number of
|
443 |
# stripes
|
444 |
for stripes_arg in range(stripes, 0, -1): |
445 |
result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
|
446 |
if not result.failed: |
447 |
break
|
448 |
if result.failed:
|
449 |
_ThrowError("LV create failed (%s): %s",
|
450 |
result.fail_reason, result.output) |
451 |
return LogicalVolume(unique_id, children, size, params)
|
452 |
|
453 |
@staticmethod
|
454 |
def _GetVolumeInfo(lvm_cmd, fields): |
455 |
"""Returns LVM Volumen infos using lvm_cmd
|
456 |
|
457 |
@param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
|
458 |
@param fields: Fields to return
|
459 |
@return: A list of dicts each with the parsed fields
|
460 |
|
461 |
"""
|
462 |
if not fields: |
463 |
raise errors.ProgrammerError("No fields specified") |
464 |
|
465 |
sep = "|"
|
466 |
cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered", |
467 |
"--separator=%s" % sep, "-o%s" % ",".join(fields)] |
468 |
|
469 |
result = utils.RunCmd(cmd) |
470 |
if result.failed:
|
471 |
raise errors.CommandError("Can't get the volume information: %s - %s" % |
472 |
(result.fail_reason, result.output)) |
473 |
|
474 |
data = [] |
475 |
for line in result.stdout.splitlines(): |
476 |
splitted_fields = line.strip().split(sep) |
477 |
|
478 |
if len(fields) != len(splitted_fields): |
479 |
raise errors.CommandError("Can't parse %s output: line '%s'" % |
480 |
(lvm_cmd, line)) |
481 |
|
482 |
data.append(splitted_fields) |
483 |
|
484 |
return data
|
485 |
|
486 |
@classmethod
|
487 |
def GetPVInfo(cls, vg_names, filter_allocatable=True): |
488 |
"""Get the free space info for PVs in a volume group.
|
489 |
|
490 |
@param vg_names: list of volume group names, if empty all will be returned
|
491 |
@param filter_allocatable: whether to skip over unallocatable PVs
|
492 |
|
493 |
@rtype: list
|
494 |
@return: list of tuples (free_space, name) with free_space in mebibytes
|
495 |
|
496 |
"""
|
497 |
try:
|
498 |
info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free", |
499 |
"pv_attr"])
|
500 |
except errors.GenericError, err:
|
501 |
logging.error("Can't get PV information: %s", err)
|
502 |
return None |
503 |
|
504 |
data = [] |
505 |
for pv_name, vg_name, pv_free, pv_attr in info: |
506 |
# (possibly) skip over pvs which are not allocatable
|
507 |
if filter_allocatable and pv_attr[0] != "a": |
508 |
continue
|
509 |
# (possibly) skip over pvs which are not in the right volume group(s)
|
510 |
if vg_names and vg_name not in vg_names: |
511 |
continue
|
512 |
data.append((float(pv_free), pv_name, vg_name))
|
513 |
|
514 |
return data
|
515 |
|
516 |
@classmethod
|
517 |
def GetVGInfo(cls, vg_names, filter_readonly=True): |
518 |
"""Get the free space info for specific VGs.
|
519 |
|
520 |
@param vg_names: list of volume group names, if empty all will be returned
|
521 |
@param filter_readonly: whether to skip over readonly VGs
|
522 |
|
523 |
@rtype: list
|
524 |
@return: list of tuples (free_space, total_size, name) with free_space in
|
525 |
MiB
|
526 |
|
527 |
"""
|
528 |
try:
|
529 |
info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr", |
530 |
"vg_size"])
|
531 |
except errors.GenericError, err:
|
532 |
logging.error("Can't get VG information: %s", err)
|
533 |
return None |
534 |
|
535 |
data = [] |
536 |
for vg_name, vg_free, vg_attr, vg_size in info: |
537 |
# (possibly) skip over vgs which are not writable
|
538 |
if filter_readonly and vg_attr[0] == "r": |
539 |
continue
|
540 |
# (possibly) skip over vgs which are not in the right volume group(s)
|
541 |
if vg_names and vg_name not in vg_names: |
542 |
continue
|
543 |
data.append((float(vg_free), float(vg_size), vg_name)) |
544 |
|
545 |
return data
|
546 |
|
547 |
@classmethod
|
548 |
def _ValidateName(cls, name): |
549 |
"""Validates that a given name is valid as VG or LV name.
|
550 |
|
551 |
The list of valid characters and restricted names is taken out of
|
552 |
the lvm(8) manpage, with the simplification that we enforce both
|
553 |
VG and LV restrictions on the names.
|
554 |
|
555 |
"""
|
556 |
if (not cls._VALID_NAME_RE.match(name) or |
557 |
name in cls._INVALID_NAMES or |
558 |
compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)): |
559 |
_ThrowError("Invalid LVM name '%s'", name)
|
560 |
|
561 |
def Remove(self): |
562 |
"""Remove this logical volume.
|
563 |
|
564 |
"""
|
565 |
if not self.minor and not self.Attach(): |
566 |
# the LV does not exist
|
567 |
return
|
568 |
result = utils.RunCmd(["lvremove", "-f", "%s/%s" % |
569 |
(self._vg_name, self._lv_name)]) |
570 |
if result.failed:
|
571 |
_ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
|
572 |
|
573 |
def Rename(self, new_id): |
574 |
"""Rename this logical volume.
|
575 |
|
576 |
"""
|
577 |
if not isinstance(new_id, (tuple, list)) or len(new_id) != 2: |
578 |
raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id) |
579 |
new_vg, new_name = new_id |
580 |
if new_vg != self._vg_name: |
581 |
raise errors.ProgrammerError("Can't move a logical volume across" |
582 |
" volume groups (from %s to to %s)" %
|
583 |
(self._vg_name, new_vg))
|
584 |
result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name]) |
585 |
if result.failed:
|
586 |
_ThrowError("Failed to rename the logical volume: %s", result.output)
|
587 |
self._lv_name = new_name
|
588 |
self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name) |
589 |
|
590 |
def Attach(self): |
591 |
"""Attach to an existing LV.
|
592 |
|
593 |
This method will try to see if an existing and active LV exists
|
594 |
which matches our name. If so, its major/minor will be
|
595 |
recorded.
|
596 |
|
597 |
"""
|
598 |
self.attached = False |
599 |
result = utils.RunCmd(["lvs", "--noheadings", "--separator=,", |
600 |
"--units=m", "--nosuffix", |
601 |
"-olv_attr,lv_kernel_major,lv_kernel_minor,"
|
602 |
"vg_extent_size,stripes", self.dev_path]) |
603 |
if result.failed:
|
604 |
logging.error("Can't find LV %s: %s, %s",
|
605 |
self.dev_path, result.fail_reason, result.output)
|
606 |
return False |
607 |
# the output can (and will) have multiple lines for multi-segment
|
608 |
# LVs, as the 'stripes' parameter is a segment one, so we take
|
609 |
# only the last entry, which is the one we're interested in; note
|
610 |
# that with LVM2 anyway the 'stripes' value must be constant
|
611 |
# across segments, so this is a no-op actually
|
612 |
out = result.stdout.splitlines() |
613 |
if not out: # totally empty result? splitlines() returns at least |
614 |
# one line for any non-empty string
|
615 |
logging.error("Can't parse LVS output, no lines? Got '%s'", str(out)) |
616 |
return False |
617 |
out = out[-1].strip().rstrip(",") |
618 |
out = out.split(",")
|
619 |
if len(out) != 5: |
620 |
logging.error("Can't parse LVS output, len(%s) != 5", str(out)) |
621 |
return False |
622 |
|
623 |
status, major, minor, pe_size, stripes = out |
624 |
if len(status) < 6: |
625 |
logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
|
626 |
return False |
627 |
|
628 |
try:
|
629 |
major = int(major)
|
630 |
minor = int(minor)
|
631 |
except (TypeError, ValueError), err: |
632 |
logging.error("lvs major/minor cannot be parsed: %s", str(err)) |
633 |
|
634 |
try:
|
635 |
pe_size = int(float(pe_size)) |
636 |
except (TypeError, ValueError), err: |
637 |
logging.error("Can't parse vg extent size: %s", err)
|
638 |
return False |
639 |
|
640 |
try:
|
641 |
stripes = int(stripes)
|
642 |
except (TypeError, ValueError), err: |
643 |
logging.error("Can't parse the number of stripes: %s", err)
|
644 |
return False |
645 |
|
646 |
self.major = major
|
647 |
self.minor = minor
|
648 |
self.pe_size = pe_size
|
649 |
self.stripe_count = stripes
|
650 |
self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing |
651 |
# storage
|
652 |
self.attached = True |
653 |
return True |
654 |
|
655 |
def Assemble(self): |
656 |
"""Assemble the device.
|
657 |
|
658 |
We always run `lvchange -ay` on the LV to ensure it's active before
|
659 |
use, as there were cases when xenvg was not active after boot
|
660 |
(also possibly after disk issues).
|
661 |
|
662 |
"""
|
663 |
result = utils.RunCmd(["lvchange", "-ay", self.dev_path]) |
664 |
if result.failed:
|
665 |
_ThrowError("Can't activate lv %s: %s", self.dev_path, result.output) |
666 |
|
667 |
def Shutdown(self): |
668 |
"""Shutdown the device.
|
669 |
|
670 |
This is a no-op for the LV device type, as we don't deactivate the
|
671 |
volumes on shutdown.
|
672 |
|
673 |
"""
|
674 |
pass
|
675 |
|
676 |
def GetSyncStatus(self): |
677 |
"""Returns the sync status of the device.
|
678 |
|
679 |
If this device is a mirroring device, this function returns the
|
680 |
status of the mirror.
|
681 |
|
682 |
For logical volumes, sync_percent and estimated_time are always
|
683 |
None (no recovery in progress, as we don't handle the mirrored LV
|
684 |
case). The is_degraded parameter is the inverse of the ldisk
|
685 |
parameter.
|
686 |
|
687 |
For the ldisk parameter, we check if the logical volume has the
|
688 |
'virtual' type, which means it's not backed by existing storage
|
689 |
anymore (read from it return I/O error). This happens after a
|
690 |
physical disk failure and subsequent 'vgreduce --removemissing' on
|
691 |
the volume group.
|
692 |
|
693 |
The status was already read in Attach, so we just return it.
|
694 |
|
695 |
@rtype: objects.BlockDevStatus
|
696 |
|
697 |
"""
|
698 |
if self._degraded: |
699 |
ldisk_status = constants.LDS_FAULTY |
700 |
else:
|
701 |
ldisk_status = constants.LDS_OKAY |
702 |
|
703 |
return objects.BlockDevStatus(dev_path=self.dev_path, |
704 |
major=self.major,
|
705 |
minor=self.minor,
|
706 |
sync_percent=None,
|
707 |
estimated_time=None,
|
708 |
is_degraded=self._degraded,
|
709 |
ldisk_status=ldisk_status) |
710 |
|
711 |
def Open(self, force=False): |
712 |
"""Make the device ready for I/O.
|
713 |
|
714 |
This is a no-op for the LV device type.
|
715 |
|
716 |
"""
|
717 |
pass
|
718 |
|
719 |
def Close(self): |
720 |
"""Notifies that the device will no longer be used for I/O.
|
721 |
|
722 |
This is a no-op for the LV device type.
|
723 |
|
724 |
"""
|
725 |
pass
|
726 |
|
727 |
def Snapshot(self, size): |
728 |
"""Create a snapshot copy of an lvm block device.
|
729 |
|
730 |
@returns: tuple (vg, lv)
|
731 |
|
732 |
"""
|
733 |
snap_name = self._lv_name + ".snap" |
734 |
|
735 |
# remove existing snapshot if found
|
736 |
snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params) |
737 |
_IgnoreError(snap.Remove) |
738 |
|
739 |
vg_info = self.GetVGInfo([self._vg_name]) |
740 |
if not vg_info: |
741 |
_ThrowError("Can't compute VG info for vg %s", self._vg_name) |
742 |
free_size, _, _ = vg_info[0]
|
743 |
if free_size < size:
|
744 |
_ThrowError("Not enough free space: required %s,"
|
745 |
" available %s", size, free_size)
|
746 |
|
747 |
result = utils.RunCmd(["lvcreate", "-L%dm" % size, "-s", |
748 |
"-n%s" % snap_name, self.dev_path]) |
749 |
if result.failed:
|
750 |
_ThrowError("command: %s error: %s - %s",
|
751 |
result.cmd, result.fail_reason, result.output) |
752 |
|
753 |
return (self._vg_name, snap_name) |
754 |
|
755 |
def SetInfo(self, text): |
756 |
"""Update metadata with info text.
|
757 |
|
758 |
"""
|
759 |
BlockDev.SetInfo(self, text)
|
760 |
|
761 |
# Replace invalid characters
|
762 |
text = re.sub("^[^A-Za-z0-9_+.]", "_", text) |
763 |
text = re.sub("[^-A-Za-z0-9_+.]", "_", text) |
764 |
|
765 |
# Only up to 128 characters are allowed
|
766 |
text = text[:128]
|
767 |
|
768 |
result = utils.RunCmd(["lvchange", "--addtag", text, |
769 |
self.dev_path])
|
770 |
if result.failed:
|
771 |
_ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
|
772 |
result.output) |
773 |
|
774 |
def Grow(self, amount, dryrun, backingstore): |
775 |
"""Grow the logical volume.
|
776 |
|
777 |
"""
|
778 |
if not backingstore: |
779 |
return
|
780 |
if self.pe_size is None or self.stripe_count is None: |
781 |
if not self.Attach(): |
782 |
_ThrowError("Can't attach to LV during Grow()")
|
783 |
full_stripe_size = self.pe_size * self.stripe_count |
784 |
rest = amount % full_stripe_size |
785 |
if rest != 0: |
786 |
amount += full_stripe_size - rest |
787 |
cmd = ["lvextend", "-L", "+%dm" % amount] |
788 |
if dryrun:
|
789 |
cmd.append("--test")
|
790 |
# we try multiple algorithms since the 'best' ones might not have
|
791 |
# space available in the right place, but later ones might (since
|
792 |
# they have less constraints); also note that only recent LVM
|
793 |
# supports 'cling'
|
794 |
for alloc_policy in "contiguous", "cling", "normal": |
795 |
result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path]) |
796 |
if not result.failed: |
797 |
return
|
798 |
_ThrowError("Can't grow LV %s: %s", self.dev_path, result.output) |
799 |
|
800 |
|
801 |
class DRBD8Status(object): |
802 |
"""A DRBD status representation class.
|
803 |
|
804 |
Note that this doesn't support unconfigured devices (cs:Unconfigured).
|
805 |
|
806 |
"""
|
807 |
UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
|
808 |
LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
|
809 |
"\s+ds:([^/]+)/(\S+)\s+.*$")
|
810 |
SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
|
811 |
# Due to a bug in drbd in the kernel, introduced in
|
812 |
# commit 4b0715f096 (still unfixed as of 2011-08-22)
|
813 |
"(?:\s|M)"
|
814 |
"finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
|
815 |
|
816 |
CS_UNCONFIGURED = "Unconfigured"
|
817 |
CS_STANDALONE = "StandAlone"
|
818 |
CS_WFCONNECTION = "WFConnection"
|
819 |
CS_WFREPORTPARAMS = "WFReportParams"
|
820 |
CS_CONNECTED = "Connected"
|
821 |
CS_STARTINGSYNCS = "StartingSyncS"
|
822 |
CS_STARTINGSYNCT = "StartingSyncT"
|
823 |
CS_WFBITMAPS = "WFBitMapS"
|
824 |
CS_WFBITMAPT = "WFBitMapT"
|
825 |
CS_WFSYNCUUID = "WFSyncUUID"
|
826 |
CS_SYNCSOURCE = "SyncSource"
|
827 |
CS_SYNCTARGET = "SyncTarget"
|
828 |
CS_PAUSEDSYNCS = "PausedSyncS"
|
829 |
CS_PAUSEDSYNCT = "PausedSyncT"
|
830 |
CSET_SYNC = frozenset([
|
831 |
CS_WFREPORTPARAMS, |
832 |
CS_STARTINGSYNCS, |
833 |
CS_STARTINGSYNCT, |
834 |
CS_WFBITMAPS, |
835 |
CS_WFBITMAPT, |
836 |
CS_WFSYNCUUID, |
837 |
CS_SYNCSOURCE, |
838 |
CS_SYNCTARGET, |
839 |
CS_PAUSEDSYNCS, |
840 |
CS_PAUSEDSYNCT, |
841 |
]) |
842 |
|
843 |
DS_DISKLESS = "Diskless"
|
844 |
DS_ATTACHING = "Attaching" # transient state |
845 |
DS_FAILED = "Failed" # transient state, next: diskless |
846 |
DS_NEGOTIATING = "Negotiating" # transient state |
847 |
DS_INCONSISTENT = "Inconsistent" # while syncing or after creation |
848 |
DS_OUTDATED = "Outdated"
|
849 |
DS_DUNKNOWN = "DUnknown" # shown for peer disk when not connected |
850 |
DS_CONSISTENT = "Consistent"
|
851 |
DS_UPTODATE = "UpToDate" # normal state |
852 |
|
853 |
RO_PRIMARY = "Primary"
|
854 |
RO_SECONDARY = "Secondary"
|
855 |
RO_UNKNOWN = "Unknown"
|
856 |
|
857 |
def __init__(self, procline): |
858 |
u = self.UNCONF_RE.match(procline)
|
859 |
if u:
|
860 |
self.cstatus = self.CS_UNCONFIGURED |
861 |
self.lrole = self.rrole = self.ldisk = self.rdisk = None |
862 |
else:
|
863 |
m = self.LINE_RE.match(procline)
|
864 |
if not m: |
865 |
raise errors.BlockDeviceError("Can't parse input data '%s'" % procline) |
866 |
self.cstatus = m.group(1) |
867 |
self.lrole = m.group(2) |
868 |
self.rrole = m.group(3) |
869 |
self.ldisk = m.group(4) |
870 |
self.rdisk = m.group(5) |
871 |
|
872 |
# end reading of data from the LINE_RE or UNCONF_RE
|
873 |
|
874 |
self.is_standalone = self.cstatus == self.CS_STANDALONE |
875 |
self.is_wfconn = self.cstatus == self.CS_WFCONNECTION |
876 |
self.is_connected = self.cstatus == self.CS_CONNECTED |
877 |
self.is_primary = self.lrole == self.RO_PRIMARY |
878 |
self.is_secondary = self.lrole == self.RO_SECONDARY |
879 |
self.peer_primary = self.rrole == self.RO_PRIMARY |
880 |
self.peer_secondary = self.rrole == self.RO_SECONDARY |
881 |
self.both_primary = self.is_primary and self.peer_primary |
882 |
self.both_secondary = self.is_secondary and self.peer_secondary |
883 |
|
884 |
self.is_diskless = self.ldisk == self.DS_DISKLESS |
885 |
self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE |
886 |
|
887 |
self.is_in_resync = self.cstatus in self.CSET_SYNC |
888 |
self.is_in_use = self.cstatus != self.CS_UNCONFIGURED |
889 |
|
890 |
m = self.SYNC_RE.match(procline)
|
891 |
if m:
|
892 |
self.sync_percent = float(m.group(1)) |
893 |
hours = int(m.group(2)) |
894 |
minutes = int(m.group(3)) |
895 |
seconds = int(m.group(4)) |
896 |
self.est_time = hours * 3600 + minutes * 60 + seconds |
897 |
else:
|
898 |
# we have (in this if branch) no percent information, but if
|
899 |
# we're resyncing we need to 'fake' a sync percent information,
|
900 |
# as this is how cmdlib determines if it makes sense to wait for
|
901 |
# resyncing or not
|
902 |
if self.is_in_resync: |
903 |
self.sync_percent = 0 |
904 |
else:
|
905 |
self.sync_percent = None |
906 |
self.est_time = None |
907 |
|
908 |
|
909 |
class BaseDRBD(BlockDev): # pylint: disable=W0223 |
910 |
"""Base DRBD class.
|
911 |
|
912 |
This class contains a few bits of common functionality between the
|
913 |
0.7 and 8.x versions of DRBD.
|
914 |
|
915 |
"""
|
916 |
_VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
|
917 |
r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
|
918 |
_VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
|
919 |
_UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
|
920 |
|
921 |
_DRBD_MAJOR = 147
|
922 |
_ST_UNCONFIGURED = "Unconfigured"
|
923 |
_ST_WFCONNECTION = "WFConnection"
|
924 |
_ST_CONNECTED = "Connected"
|
925 |
|
926 |
_STATUS_FILE = "/proc/drbd"
|
927 |
_USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
|
928 |
|
929 |
@staticmethod
|
930 |
def _GetProcData(filename=_STATUS_FILE): |
931 |
"""Return data from /proc/drbd.
|
932 |
|
933 |
"""
|
934 |
try:
|
935 |
data = utils.ReadFile(filename).splitlines() |
936 |
except EnvironmentError, err: |
937 |
if err.errno == errno.ENOENT:
|
938 |
_ThrowError("The file %s cannot be opened, check if the module"
|
939 |
" is loaded (%s)", filename, str(err)) |
940 |
else:
|
941 |
_ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err)) |
942 |
if not data: |
943 |
_ThrowError("Can't read any data from %s", filename)
|
944 |
return data
|
945 |
|
946 |
@classmethod
|
947 |
def _MassageProcData(cls, data): |
948 |
"""Transform the output of _GetProdData into a nicer form.
|
949 |
|
950 |
@return: a dictionary of minor: joined lines from /proc/drbd
|
951 |
for that minor
|
952 |
|
953 |
"""
|
954 |
results = {} |
955 |
old_minor = old_line = None
|
956 |
for line in data: |
957 |
if not line: # completely empty lines, as can be returned by drbd8.0+ |
958 |
continue
|
959 |
lresult = cls._VALID_LINE_RE.match(line) |
960 |
if lresult is not None: |
961 |
if old_minor is not None: |
962 |
results[old_minor] = old_line |
963 |
old_minor = int(lresult.group(1)) |
964 |
old_line = line |
965 |
else:
|
966 |
if old_minor is not None: |
967 |
old_line += " " + line.strip()
|
968 |
# add last line
|
969 |
if old_minor is not None: |
970 |
results[old_minor] = old_line |
971 |
return results
|
972 |
|
973 |
@classmethod
|
974 |
def _GetVersion(cls, proc_data): |
975 |
"""Return the DRBD version.
|
976 |
|
977 |
This will return a dict with keys:
|
978 |
- k_major
|
979 |
- k_minor
|
980 |
- k_point
|
981 |
- api
|
982 |
- proto
|
983 |
- proto2 (only on drbd > 8.2.X)
|
984 |
|
985 |
"""
|
986 |
first_line = proc_data[0].strip()
|
987 |
version = cls._VERSION_RE.match(first_line) |
988 |
if not version: |
989 |
raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" % |
990 |
first_line) |
991 |
|
992 |
values = version.groups() |
993 |
retval = {"k_major": int(values[0]), |
994 |
"k_minor": int(values[1]), |
995 |
"k_point": int(values[2]), |
996 |
"api": int(values[3]), |
997 |
"proto": int(values[4]), |
998 |
} |
999 |
if values[5] is not None: |
1000 |
retval["proto2"] = values[5] |
1001 |
|
1002 |
return retval
|
1003 |
|
1004 |
@staticmethod
|
1005 |
def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE): |
1006 |
"""Returns DRBD usermode_helper currently set.
|
1007 |
|
1008 |
"""
|
1009 |
try:
|
1010 |
helper = utils.ReadFile(filename).splitlines()[0]
|
1011 |
except EnvironmentError, err: |
1012 |
if err.errno == errno.ENOENT:
|
1013 |
_ThrowError("The file %s cannot be opened, check if the module"
|
1014 |
" is loaded (%s)", filename, str(err)) |
1015 |
else:
|
1016 |
_ThrowError("Can't read DRBD helper file %s: %s", filename, str(err)) |
1017 |
if not helper: |
1018 |
_ThrowError("Can't read any data from %s", filename)
|
1019 |
return helper
|
1020 |
|
1021 |
@staticmethod
|
1022 |
def _DevPath(minor): |
1023 |
"""Return the path to a drbd device for a given minor.
|
1024 |
|
1025 |
"""
|
1026 |
return "/dev/drbd%d" % minor |
1027 |
|
1028 |
@classmethod
|
1029 |
def GetUsedDevs(cls): |
1030 |
"""Compute the list of used DRBD devices.
|
1031 |
|
1032 |
"""
|
1033 |
data = cls._GetProcData() |
1034 |
|
1035 |
used_devs = {} |
1036 |
for line in data: |
1037 |
match = cls._VALID_LINE_RE.match(line) |
1038 |
if not match: |
1039 |
continue
|
1040 |
minor = int(match.group(1)) |
1041 |
state = match.group(2)
|
1042 |
if state == cls._ST_UNCONFIGURED:
|
1043 |
continue
|
1044 |
used_devs[minor] = state, line |
1045 |
|
1046 |
return used_devs
|
1047 |
|
1048 |
def _SetFromMinor(self, minor): |
1049 |
"""Set our parameters based on the given minor.
|
1050 |
|
1051 |
This sets our minor variable and our dev_path.
|
1052 |
|
1053 |
"""
|
1054 |
if minor is None: |
1055 |
self.minor = self.dev_path = None |
1056 |
self.attached = False |
1057 |
else:
|
1058 |
self.minor = minor
|
1059 |
self.dev_path = self._DevPath(minor) |
1060 |
self.attached = True |
1061 |
|
1062 |
@staticmethod
|
1063 |
def _CheckMetaSize(meta_device): |
1064 |
"""Check if the given meta device looks like a valid one.
|
1065 |
|
1066 |
This currently only checks the size, which must be around
|
1067 |
128MiB.
|
1068 |
|
1069 |
"""
|
1070 |
result = utils.RunCmd(["blockdev", "--getsize", meta_device]) |
1071 |
if result.failed:
|
1072 |
_ThrowError("Failed to get device size: %s - %s",
|
1073 |
result.fail_reason, result.output) |
1074 |
try:
|
1075 |
sectors = int(result.stdout)
|
1076 |
except (TypeError, ValueError): |
1077 |
_ThrowError("Invalid output from blockdev: '%s'", result.stdout)
|
1078 |
num_bytes = sectors * 512
|
1079 |
if num_bytes < 128 * 1024 * 1024: # less than 128MiB |
1080 |
_ThrowError("Meta device too small (%.2fMib)", (num_bytes / 1024 / 1024)) |
1081 |
# the maximum *valid* size of the meta device when living on top
|
1082 |
# of LVM is hard to compute: it depends on the number of stripes
|
1083 |
# and the PE size; e.g. a 2-stripe, 64MB PE will result in a 128MB
|
1084 |
# (normal size), but an eight-stripe 128MB PE will result in a 1GB
|
1085 |
# size meta device; as such, we restrict it to 1GB (a little bit
|
1086 |
# too generous, but making assumptions about PE size is hard)
|
1087 |
if num_bytes > 1024 * 1024 * 1024: |
1088 |
_ThrowError("Meta device too big (%.2fMiB)", (num_bytes / 1024 / 1024)) |
1089 |
|
1090 |
def Rename(self, new_id): |
1091 |
"""Rename a device.
|
1092 |
|
1093 |
This is not supported for drbd devices.
|
1094 |
|
1095 |
"""
|
1096 |
raise errors.ProgrammerError("Can't rename a drbd device") |
1097 |
|
1098 |
|
1099 |
class DRBD8(BaseDRBD): |
1100 |
"""DRBD v8.x block device.
|
1101 |
|
1102 |
This implements the local host part of the DRBD device, i.e. it
|
1103 |
doesn't do anything to the supposed peer. If you need a fully
|
1104 |
connected DRBD pair, you need to use this class on both hosts.
|
1105 |
|
1106 |
The unique_id for the drbd device is the (local_ip, local_port,
|
1107 |
remote_ip, remote_port) tuple, and it must have two children: the
|
1108 |
data device and the meta_device. The meta device is checked for
|
1109 |
valid size and is zeroed on create.
|
1110 |
|
1111 |
"""
|
1112 |
_MAX_MINORS = 255
|
1113 |
_PARSE_SHOW = None
|
1114 |
|
1115 |
# timeout constants
|
1116 |
_NET_RECONFIG_TIMEOUT = 60
|
1117 |
|
1118 |
# command line options for barriers
|
1119 |
_DISABLE_DISK_OPTION = "--no-disk-barrier" # -a |
1120 |
_DISABLE_DRAIN_OPTION = "--no-disk-drain" # -D |
1121 |
_DISABLE_FLUSH_OPTION = "--no-disk-flushes" # -i |
1122 |
_DISABLE_META_FLUSH_OPTION = "--no-md-flushes" # -m |
1123 |
|
1124 |
def __init__(self, unique_id, children, size, params): |
1125 |
if children and children.count(None) > 0: |
1126 |
children = [] |
1127 |
if len(children) not in (0, 2): |
1128 |
raise ValueError("Invalid configuration data %s" % str(children)) |
1129 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6: |
1130 |
raise ValueError("Invalid configuration data %s" % str(unique_id)) |
1131 |
(self._lhost, self._lport, |
1132 |
self._rhost, self._rport, |
1133 |
self._aminor, self._secret) = unique_id |
1134 |
if children:
|
1135 |
if not _CanReadDevice(children[1].dev_path): |
1136 |
logging.info("drbd%s: Ignoring unreadable meta device", self._aminor) |
1137 |
children = [] |
1138 |
super(DRBD8, self).__init__(unique_id, children, size, params) |
1139 |
self.major = self._DRBD_MAJOR |
1140 |
version = self._GetVersion(self._GetProcData()) |
1141 |
if version["k_major"] != 8: |
1142 |
_ThrowError("Mismatch in DRBD kernel version and requested ganeti"
|
1143 |
" usage: kernel is %s.%s, ganeti wants 8.x",
|
1144 |
version["k_major"], version["k_minor"]) |
1145 |
|
1146 |
if (self._lhost is not None and self._lhost == self._rhost and |
1147 |
self._lport == self._rport): |
1148 |
raise ValueError("Invalid configuration data, same local/remote %s" % |
1149 |
(unique_id,)) |
1150 |
self.Attach()
|
1151 |
|
1152 |
@classmethod
|
1153 |
def _InitMeta(cls, minor, dev_path): |
1154 |
"""Initialize a meta device.
|
1155 |
|
1156 |
This will not work if the given minor is in use.
|
1157 |
|
1158 |
"""
|
1159 |
# Zero the metadata first, in order to make sure drbdmeta doesn't
|
1160 |
# try to auto-detect existing filesystems or similar (see
|
1161 |
# http://code.google.com/p/ganeti/issues/detail?id=182); we only
|
1162 |
# care about the first 128MB of data in the device, even though it
|
1163 |
# can be bigger
|
1164 |
result = utils.RunCmd([constants.DD_CMD, |
1165 |
"if=/dev/zero", "of=%s" % dev_path, |
1166 |
"bs=1048576", "count=128", "oflag=direct"]) |
1167 |
if result.failed:
|
1168 |
_ThrowError("Can't wipe the meta device: %s", result.output)
|
1169 |
|
1170 |
result = utils.RunCmd(["drbdmeta", "--force", cls._DevPath(minor), |
1171 |
"v08", dev_path, "0", "create-md"]) |
1172 |
if result.failed:
|
1173 |
_ThrowError("Can't initialize meta device: %s", result.output)
|
1174 |
|
1175 |
@classmethod
|
1176 |
def _FindUnusedMinor(cls): |
1177 |
"""Find an unused DRBD device.
|
1178 |
|
1179 |
This is specific to 8.x as the minors are allocated dynamically,
|
1180 |
so non-existing numbers up to a max minor count are actually free.
|
1181 |
|
1182 |
"""
|
1183 |
data = cls._GetProcData() |
1184 |
|
1185 |
highest = None
|
1186 |
for line in data: |
1187 |
match = cls._UNUSED_LINE_RE.match(line) |
1188 |
if match:
|
1189 |
return int(match.group(1)) |
1190 |
match = cls._VALID_LINE_RE.match(line) |
1191 |
if match:
|
1192 |
minor = int(match.group(1)) |
1193 |
highest = max(highest, minor)
|
1194 |
if highest is None: # there are no minors in use at all |
1195 |
return 0 |
1196 |
if highest >= cls._MAX_MINORS:
|
1197 |
logging.error("Error: no free drbd minors!")
|
1198 |
raise errors.BlockDeviceError("Can't find a free DRBD minor") |
1199 |
return highest + 1 |
1200 |
|
1201 |
@classmethod
|
1202 |
def _GetShowParser(cls): |
1203 |
"""Return a parser for `drbd show` output.
|
1204 |
|
1205 |
This will either create or return an already-created parser for the
|
1206 |
output of the command `drbd show`.
|
1207 |
|
1208 |
"""
|
1209 |
if cls._PARSE_SHOW is not None: |
1210 |
return cls._PARSE_SHOW
|
1211 |
|
1212 |
# pyparsing setup
|
1213 |
lbrace = pyp.Literal("{").suppress()
|
1214 |
rbrace = pyp.Literal("}").suppress()
|
1215 |
lbracket = pyp.Literal("[").suppress()
|
1216 |
rbracket = pyp.Literal("]").suppress()
|
1217 |
semi = pyp.Literal(";").suppress()
|
1218 |
colon = pyp.Literal(":").suppress()
|
1219 |
# this also converts the value to an int
|
1220 |
number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0])) |
1221 |
|
1222 |
comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
|
1223 |
defa = pyp.Literal("_is_default").suppress()
|
1224 |
dbl_quote = pyp.Literal('"').suppress()
|
1225 |
|
1226 |
keyword = pyp.Word(pyp.alphanums + "-")
|
1227 |
|
1228 |
# value types
|
1229 |
value = pyp.Word(pyp.alphanums + "_-/.:")
|
1230 |
quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
|
1231 |
ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
|
1232 |
pyp.Word(pyp.nums + ".") + colon + number)
|
1233 |
ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
|
1234 |
pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
|
1235 |
pyp.Optional(rbracket) + colon + number) |
1236 |
# meta device, extended syntax
|
1237 |
meta_value = ((value ^ quoted) + lbracket + number + rbracket) |
1238 |
# device name, extended syntax
|
1239 |
device_value = pyp.Literal("minor").suppress() + number
|
1240 |
|
1241 |
# a statement
|
1242 |
stmt = (~rbrace + keyword + ~lbrace + |
1243 |
pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^ |
1244 |
device_value) + |
1245 |
pyp.Optional(defa) + semi + |
1246 |
pyp.Optional(pyp.restOfLine).suppress()) |
1247 |
|
1248 |
# an entire section
|
1249 |
section_name = pyp.Word(pyp.alphas + "_")
|
1250 |
section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace |
1251 |
|
1252 |
bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt)) |
1253 |
bnf.ignore(comment) |
1254 |
|
1255 |
cls._PARSE_SHOW = bnf |
1256 |
|
1257 |
return bnf
|
1258 |
|
1259 |
@classmethod
|
1260 |
def _GetShowData(cls, minor): |
1261 |
"""Return the `drbdsetup show` data for a minor.
|
1262 |
|
1263 |
"""
|
1264 |
result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"]) |
1265 |
if result.failed:
|
1266 |
logging.error("Can't display the drbd config: %s - %s",
|
1267 |
result.fail_reason, result.output) |
1268 |
return None |
1269 |
return result.stdout
|
1270 |
|
1271 |
@classmethod
|
1272 |
def _GetDevInfo(cls, out): |
1273 |
"""Parse details about a given DRBD minor.
|
1274 |
|
1275 |
This return, if available, the local backing device (as a path)
|
1276 |
and the local and remote (ip, port) information from a string
|
1277 |
containing the output of the `drbdsetup show` command as returned
|
1278 |
by _GetShowData.
|
1279 |
|
1280 |
"""
|
1281 |
data = {} |
1282 |
if not out: |
1283 |
return data
|
1284 |
|
1285 |
bnf = cls._GetShowParser() |
1286 |
# run pyparse
|
1287 |
|
1288 |
try:
|
1289 |
results = bnf.parseString(out) |
1290 |
except pyp.ParseException, err:
|
1291 |
_ThrowError("Can't parse drbdsetup show output: %s", str(err)) |
1292 |
|
1293 |
# and massage the results into our desired format
|
1294 |
for section in results: |
1295 |
sname = section[0]
|
1296 |
if sname == "_this_host": |
1297 |
for lst in section[1:]: |
1298 |
if lst[0] == "disk": |
1299 |
data["local_dev"] = lst[1] |
1300 |
elif lst[0] == "meta-disk": |
1301 |
data["meta_dev"] = lst[1] |
1302 |
data["meta_index"] = lst[2] |
1303 |
elif lst[0] == "address": |
1304 |
data["local_addr"] = tuple(lst[1:]) |
1305 |
elif sname == "_remote_host": |
1306 |
for lst in section[1:]: |
1307 |
if lst[0] == "address": |
1308 |
data["remote_addr"] = tuple(lst[1:]) |
1309 |
return data
|
1310 |
|
1311 |
def _MatchesLocal(self, info): |
1312 |
"""Test if our local config matches with an existing device.
|
1313 |
|
1314 |
The parameter should be as returned from `_GetDevInfo()`. This
|
1315 |
method tests if our local backing device is the same as the one in
|
1316 |
the info parameter, in effect testing if we look like the given
|
1317 |
device.
|
1318 |
|
1319 |
"""
|
1320 |
if self._children: |
1321 |
backend, meta = self._children
|
1322 |
else:
|
1323 |
backend = meta = None
|
1324 |
|
1325 |
if backend is not None: |
1326 |
retval = ("local_dev" in info and info["local_dev"] == backend.dev_path) |
1327 |
else:
|
1328 |
retval = ("local_dev" not in info) |
1329 |
|
1330 |
if meta is not None: |
1331 |
retval = retval and ("meta_dev" in info and |
1332 |
info["meta_dev"] == meta.dev_path)
|
1333 |
retval = retval and ("meta_index" in info and |
1334 |
info["meta_index"] == 0) |
1335 |
else:
|
1336 |
retval = retval and ("meta_dev" not in info and |
1337 |
"meta_index" not in info) |
1338 |
return retval
|
1339 |
|
1340 |
def _MatchesNet(self, info): |
1341 |
"""Test if our network config matches with an existing device.
|
1342 |
|
1343 |
The parameter should be as returned from `_GetDevInfo()`. This
|
1344 |
method tests if our network configuration is the same as the one
|
1345 |
in the info parameter, in effect testing if we look like the given
|
1346 |
device.
|
1347 |
|
1348 |
"""
|
1349 |
if (((self._lhost is None and not ("local_addr" in info)) and |
1350 |
(self._rhost is None and not ("remote_addr" in info)))): |
1351 |
return True |
1352 |
|
1353 |
if self._lhost is None: |
1354 |
return False |
1355 |
|
1356 |
if not ("local_addr" in info and |
1357 |
"remote_addr" in info): |
1358 |
return False |
1359 |
|
1360 |
retval = (info["local_addr"] == (self._lhost, self._lport)) |
1361 |
retval = (retval and
|
1362 |
info["remote_addr"] == (self._rhost, self._rport)) |
1363 |
return retval
|
1364 |
|
1365 |
def _AssembleLocal(self, minor, backend, meta, size): |
1366 |
"""Configure the local part of a DRBD device.
|
1367 |
|
1368 |
"""
|
1369 |
args = ["drbdsetup", self._DevPath(minor), "disk", |
1370 |
backend, meta, "0",
|
1371 |
"-e", "detach", |
1372 |
"--create-device"]
|
1373 |
if size:
|
1374 |
args.extend(["-d", "%sm" % size]) |
1375 |
|
1376 |
version = self._GetVersion(self._GetProcData()) |
1377 |
vmaj = version["k_major"]
|
1378 |
vmin = version["k_minor"]
|
1379 |
vrel = version["k_point"]
|
1380 |
|
1381 |
barrier_args = \ |
1382 |
self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
|
1383 |
self.params[constants.LDP_BARRIERS],
|
1384 |
self.params[constants.LDP_NO_META_FLUSH])
|
1385 |
args.extend(barrier_args) |
1386 |
|
1387 |
if self.params[constants.LDP_DISK_CUSTOM]: |
1388 |
args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
|
1389 |
|
1390 |
result = utils.RunCmd(args) |
1391 |
if result.failed:
|
1392 |
_ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
|
1393 |
|
1394 |
@classmethod
|
1395 |
def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers, |
1396 |
disable_meta_flush): |
1397 |
"""Compute the DRBD command line parameters for disk barriers
|
1398 |
|
1399 |
Returns a list of the disk barrier parameters as requested via the
|
1400 |
disabled_barriers and disable_meta_flush arguments, and according to the
|
1401 |
supported ones in the DRBD version vmaj.vmin.vrel
|
1402 |
|
1403 |
If the desired option is unsupported, raises errors.BlockDeviceError.
|
1404 |
|
1405 |
"""
|
1406 |
disabled_barriers_set = frozenset(disabled_barriers)
|
1407 |
if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT: |
1408 |
raise errors.BlockDeviceError("%s is not a valid option set for DRBD" |
1409 |
" barriers" % disabled_barriers)
|
1410 |
|
1411 |
args = [] |
1412 |
|
1413 |
# The following code assumes DRBD 8.x, with x < 4 and x != 1 (DRBD 8.1.x
|
1414 |
# does not exist)
|
1415 |
if not vmaj == 8 and vmin in (0, 2, 3): |
1416 |
raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" % |
1417 |
(vmaj, vmin, vrel)) |
1418 |
|
1419 |
def _AppendOrRaise(option, min_version): |
1420 |
"""Helper for DRBD options"""
|
1421 |
if min_version is not None and vrel >= min_version: |
1422 |
args.append(option) |
1423 |
else:
|
1424 |
raise errors.BlockDeviceError("Could not use the option %s as the" |
1425 |
" DRBD version %d.%d.%d does not support"
|
1426 |
" it." % (option, vmaj, vmin, vrel))
|
1427 |
|
1428 |
# the minimum version for each feature is encoded via pairs of (minor
|
1429 |
# version -> x) where x is version in which support for the option was
|
1430 |
# introduced.
|
1431 |
meta_flush_supported = disk_flush_supported = { |
1432 |
0: 12, |
1433 |
2: 7, |
1434 |
3: 0, |
1435 |
} |
1436 |
|
1437 |
disk_drain_supported = { |
1438 |
2: 7, |
1439 |
3: 0, |
1440 |
} |
1441 |
|
1442 |
disk_barriers_supported = { |
1443 |
3: 0, |
1444 |
} |
1445 |
|
1446 |
# meta flushes
|
1447 |
if disable_meta_flush:
|
1448 |
_AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION, |
1449 |
meta_flush_supported.get(vmin, None))
|
1450 |
|
1451 |
# disk flushes
|
1452 |
if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set: |
1453 |
_AppendOrRaise(cls._DISABLE_FLUSH_OPTION, |
1454 |
disk_flush_supported.get(vmin, None))
|
1455 |
|
1456 |
# disk drain
|
1457 |
if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set: |
1458 |
_AppendOrRaise(cls._DISABLE_DRAIN_OPTION, |
1459 |
disk_drain_supported.get(vmin, None))
|
1460 |
|
1461 |
# disk barriers
|
1462 |
if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set: |
1463 |
_AppendOrRaise(cls._DISABLE_DISK_OPTION, |
1464 |
disk_barriers_supported.get(vmin, None))
|
1465 |
|
1466 |
return args
|
1467 |
|
1468 |
def _AssembleNet(self, minor, net_info, protocol, |
1469 |
dual_pri=False, hmac=None, secret=None): |
1470 |
"""Configure the network part of the device.
|
1471 |
|
1472 |
"""
|
1473 |
lhost, lport, rhost, rport = net_info |
1474 |
if None in net_info: |
1475 |
# we don't want network connection and actually want to make
|
1476 |
# sure its shutdown
|
1477 |
self._ShutdownNet(minor)
|
1478 |
return
|
1479 |
|
1480 |
# Workaround for a race condition. When DRBD is doing its dance to
|
1481 |
# establish a connection with its peer, it also sends the
|
1482 |
# synchronization speed over the wire. In some cases setting the
|
1483 |
# sync speed only after setting up both sides can race with DRBD
|
1484 |
# connecting, hence we set it here before telling DRBD anything
|
1485 |
# about its peer.
|
1486 |
sync_errors = self._SetMinorSyncParams(minor, self.params) |
1487 |
if sync_errors:
|
1488 |
_ThrowError("drbd%d: can't set the synchronization parameters: %s" %
|
1489 |
(minor, utils.CommaJoin(sync_errors))) |
1490 |
|
1491 |
if netutils.IP6Address.IsValid(lhost):
|
1492 |
if not netutils.IP6Address.IsValid(rhost): |
1493 |
_ThrowError("drbd%d: can't connect ip %s to ip %s" %
|
1494 |
(minor, lhost, rhost)) |
1495 |
family = "ipv6"
|
1496 |
elif netutils.IP4Address.IsValid(lhost):
|
1497 |
if not netutils.IP4Address.IsValid(rhost): |
1498 |
_ThrowError("drbd%d: can't connect ip %s to ip %s" %
|
1499 |
(minor, lhost, rhost)) |
1500 |
family = "ipv4"
|
1501 |
else:
|
1502 |
_ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
|
1503 |
|
1504 |
args = ["drbdsetup", self._DevPath(minor), "net", |
1505 |
"%s:%s:%s" % (family, lhost, lport),
|
1506 |
"%s:%s:%s" % (family, rhost, rport), protocol,
|
1507 |
"-A", "discard-zero-changes", |
1508 |
"-B", "consensus", |
1509 |
"--create-device",
|
1510 |
] |
1511 |
if dual_pri:
|
1512 |
args.append("-m")
|
1513 |
if hmac and secret: |
1514 |
args.extend(["-a", hmac, "-x", secret]) |
1515 |
|
1516 |
if self.params[constants.LDP_NET_CUSTOM]: |
1517 |
args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
|
1518 |
|
1519 |
result = utils.RunCmd(args) |
1520 |
if result.failed:
|
1521 |
_ThrowError("drbd%d: can't setup network: %s - %s",
|
1522 |
minor, result.fail_reason, result.output) |
1523 |
|
1524 |
def _CheckNetworkConfig(): |
1525 |
info = self._GetDevInfo(self._GetShowData(minor)) |
1526 |
if not "local_addr" in info or not "remote_addr" in info: |
1527 |
raise utils.RetryAgain()
|
1528 |
|
1529 |
if (info["local_addr"] != (lhost, lport) or |
1530 |
info["remote_addr"] != (rhost, rport)):
|
1531 |
raise utils.RetryAgain()
|
1532 |
|
1533 |
try:
|
1534 |
utils.Retry(_CheckNetworkConfig, 1.0, 10.0) |
1535 |
except utils.RetryTimeout:
|
1536 |
_ThrowError("drbd%d: timeout while configuring network", minor)
|
1537 |
|
1538 |
def AddChildren(self, devices): |
1539 |
"""Add a disk to the DRBD device.
|
1540 |
|
1541 |
"""
|
1542 |
if self.minor is None: |
1543 |
_ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
|
1544 |
self._aminor)
|
1545 |
if len(devices) != 2: |
1546 |
_ThrowError("drbd%d: need two devices for AddChildren", self.minor) |
1547 |
info = self._GetDevInfo(self._GetShowData(self.minor)) |
1548 |
if "local_dev" in info: |
1549 |
_ThrowError("drbd%d: already attached to a local disk", self.minor) |
1550 |
backend, meta = devices |
1551 |
if backend.dev_path is None or meta.dev_path is None: |
1552 |
_ThrowError("drbd%d: children not ready during AddChildren", self.minor) |
1553 |
backend.Open() |
1554 |
meta.Open() |
1555 |
self._CheckMetaSize(meta.dev_path)
|
1556 |
self._InitMeta(self._FindUnusedMinor(), meta.dev_path) |
1557 |
|
1558 |
self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size) |
1559 |
self._children = devices
|
1560 |
|
1561 |
def RemoveChildren(self, devices): |
1562 |
"""Detach the drbd device from local storage.
|
1563 |
|
1564 |
"""
|
1565 |
if self.minor is None: |
1566 |
_ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
|
1567 |
self._aminor)
|
1568 |
# early return if we don't actually have backing storage
|
1569 |
info = self._GetDevInfo(self._GetShowData(self.minor)) |
1570 |
if "local_dev" not in info: |
1571 |
return
|
1572 |
if len(self._children) != 2: |
1573 |
_ThrowError("drbd%d: we don't have two children: %s", self.minor, |
1574 |
self._children)
|
1575 |
if self._children.count(None) == 2: # we don't actually have children :) |
1576 |
logging.warning("drbd%d: requested detach while detached", self.minor) |
1577 |
return
|
1578 |
if len(devices) != 2: |
1579 |
_ThrowError("drbd%d: we need two children in RemoveChildren", self.minor) |
1580 |
for child, dev in zip(self._children, devices): |
1581 |
if dev != child.dev_path:
|
1582 |
_ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
|
1583 |
" RemoveChildren", self.minor, dev, child.dev_path) |
1584 |
|
1585 |
self._ShutdownLocal(self.minor) |
1586 |
self._children = []
|
1587 |
|
1588 |
@classmethod
|
1589 |
def _SetMinorSyncParams(cls, minor, params): |
1590 |
"""Set the parameters of the DRBD syncer.
|
1591 |
|
1592 |
This is the low-level implementation.
|
1593 |
|
1594 |
@type minor: int
|
1595 |
@param minor: the drbd minor whose settings we change
|
1596 |
@type params: dict
|
1597 |
@param params: LD level disk parameters related to the synchronization
|
1598 |
@rtype: list
|
1599 |
@return: a list of error messages
|
1600 |
|
1601 |
"""
|
1602 |
|
1603 |
args = ["drbdsetup", cls._DevPath(minor), "syncer"] |
1604 |
if params[constants.LDP_DYNAMIC_RESYNC]:
|
1605 |
version = cls._GetVersion(cls._GetProcData()) |
1606 |
vmin = version["k_minor"]
|
1607 |
vrel = version["k_point"]
|
1608 |
|
1609 |
# By definition we are using 8.x, so just check the rest of the version
|
1610 |
# number
|
1611 |
if vmin != 3 or vrel < 9: |
1612 |
msg = ("The current DRBD version (8.%d.%d) does not support the "
|
1613 |
"dynamic resync speed controller" % (vmin, vrel))
|
1614 |
logging.error(msg) |
1615 |
return [msg]
|
1616 |
|
1617 |
if params[constants.LDP_PLAN_AHEAD] == 0: |
1618 |
msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
|
1619 |
" controller at DRBD level. If you want to disable it, please"
|
1620 |
" set the dynamic-resync disk parameter to False.")
|
1621 |
logging.error(msg) |
1622 |
return [msg]
|
1623 |
|
1624 |
# add the c-* parameters to args
|
1625 |
args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
|
1626 |
"--c-fill-target", params[constants.LDP_FILL_TARGET],
|
1627 |
"--c-delay-target", params[constants.LDP_DELAY_TARGET],
|
1628 |
"--c-max-rate", params[constants.LDP_MAX_RATE],
|
1629 |
"--c-min-rate", params[constants.LDP_MIN_RATE],
|
1630 |
]) |
1631 |
|
1632 |
else:
|
1633 |
args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]]) |
1634 |
|
1635 |
args.append("--create-device")
|
1636 |
result = utils.RunCmd(args) |
1637 |
if result.failed:
|
1638 |
msg = ("Can't change syncer rate: %s - %s" %
|
1639 |
(result.fail_reason, result.output)) |
1640 |
logging.error(msg) |
1641 |
return msg
|
1642 |
|
1643 |
return []
|
1644 |
|
1645 |
def SetSyncParams(self, params): |
1646 |
"""Set the synchronization parameters of the DRBD syncer.
|
1647 |
|
1648 |
@type params: dict
|
1649 |
@param params: LD level disk parameters related to the synchronization
|
1650 |
@rtype: list
|
1651 |
@return: a list of error messages, emitted both by the current node and by
|
1652 |
children. An empty list means no errors
|
1653 |
|
1654 |
"""
|
1655 |
if self.minor is None: |
1656 |
err = "Not attached during SetSyncParams"
|
1657 |
logging.info(err) |
1658 |
return [err]
|
1659 |
|
1660 |
children_result = super(DRBD8, self).SetSyncParams(params) |
1661 |
children_result.extend(self._SetMinorSyncParams(self.minor, params)) |
1662 |
return children_result
|
1663 |
|
1664 |
def PauseResumeSync(self, pause): |
1665 |
"""Pauses or resumes the sync of a DRBD device.
|
1666 |
|
1667 |
@param pause: Wether to pause or resume
|
1668 |
@return: the success of the operation
|
1669 |
|
1670 |
"""
|
1671 |
if self.minor is None: |
1672 |
logging.info("Not attached during PauseSync")
|
1673 |
return False |
1674 |
|
1675 |
children_result = super(DRBD8, self).PauseResumeSync(pause) |
1676 |
|
1677 |
if pause:
|
1678 |
cmd = "pause-sync"
|
1679 |
else:
|
1680 |
cmd = "resume-sync"
|
1681 |
|
1682 |
result = utils.RunCmd(["drbdsetup", self.dev_path, cmd]) |
1683 |
if result.failed:
|
1684 |
logging.error("Can't %s: %s - %s", cmd,
|
1685 |
result.fail_reason, result.output) |
1686 |
return not result.failed and children_result |
1687 |
|
1688 |
def GetProcStatus(self): |
1689 |
"""Return device data from /proc.
|
1690 |
|
1691 |
"""
|
1692 |
if self.minor is None: |
1693 |
_ThrowError("drbd%d: GetStats() called while not attached", self._aminor) |
1694 |
proc_info = self._MassageProcData(self._GetProcData()) |
1695 |
if self.minor not in proc_info: |
1696 |
_ThrowError("drbd%d: can't find myself in /proc", self.minor) |
1697 |
return DRBD8Status(proc_info[self.minor]) |
1698 |
|
1699 |
def GetSyncStatus(self): |
1700 |
"""Returns the sync status of the device.
|
1701 |
|
1702 |
|
1703 |
If sync_percent is None, it means all is ok
|
1704 |
If estimated_time is None, it means we can't estimate
|
1705 |
the time needed, otherwise it's the time left in seconds.
|
1706 |
|
1707 |
|
1708 |
We set the is_degraded parameter to True on two conditions:
|
1709 |
network not connected or local disk missing.
|
1710 |
|
1711 |
We compute the ldisk parameter based on whether we have a local
|
1712 |
disk or not.
|
1713 |
|
1714 |
@rtype: objects.BlockDevStatus
|
1715 |
|
1716 |
"""
|
1717 |
if self.minor is None and not self.Attach(): |
1718 |
_ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor) |
1719 |
|
1720 |
stats = self.GetProcStatus()
|
1721 |
is_degraded = not stats.is_connected or not stats.is_disk_uptodate |
1722 |
|
1723 |
if stats.is_disk_uptodate:
|
1724 |
ldisk_status = constants.LDS_OKAY |
1725 |
elif stats.is_diskless:
|
1726 |
ldisk_status = constants.LDS_FAULTY |
1727 |
else:
|
1728 |
ldisk_status = constants.LDS_UNKNOWN |
1729 |
|
1730 |
return objects.BlockDevStatus(dev_path=self.dev_path, |
1731 |
major=self.major,
|
1732 |
minor=self.minor,
|
1733 |
sync_percent=stats.sync_percent, |
1734 |
estimated_time=stats.est_time, |
1735 |
is_degraded=is_degraded, |
1736 |
ldisk_status=ldisk_status) |
1737 |
|
1738 |
def Open(self, force=False): |
1739 |
"""Make the local state primary.
|
1740 |
|
1741 |
If the 'force' parameter is given, the '-o' option is passed to
|
1742 |
drbdsetup. Since this is a potentially dangerous operation, the
|
1743 |
force flag should be only given after creation, when it actually
|
1744 |
is mandatory.
|
1745 |
|
1746 |
"""
|
1747 |
if self.minor is None and not self.Attach(): |
1748 |
logging.error("DRBD cannot attach to a device during open")
|
1749 |
return False |
1750 |
cmd = ["drbdsetup", self.dev_path, "primary"] |
1751 |
if force:
|
1752 |
cmd.append("-o")
|
1753 |
result = utils.RunCmd(cmd) |
1754 |
if result.failed:
|
1755 |
_ThrowError("drbd%d: can't make drbd device primary: %s", self.minor, |
1756 |
result.output) |
1757 |
|
1758 |
def Close(self): |
1759 |
"""Make the local state secondary.
|
1760 |
|
1761 |
This will, of course, fail if the device is in use.
|
1762 |
|
1763 |
"""
|
1764 |
if self.minor is None and not self.Attach(): |
1765 |
_ThrowError("drbd%d: can't Attach() in Close()", self._aminor) |
1766 |
result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"]) |
1767 |
if result.failed:
|
1768 |
_ThrowError("drbd%d: can't switch drbd device to secondary: %s",
|
1769 |
self.minor, result.output)
|
1770 |
|
1771 |
def DisconnectNet(self): |
1772 |
"""Removes network configuration.
|
1773 |
|
1774 |
This method shutdowns the network side of the device.
|
1775 |
|
1776 |
The method will wait up to a hardcoded timeout for the device to
|
1777 |
go into standalone after the 'disconnect' command before
|
1778 |
re-configuring it, as sometimes it takes a while for the
|
1779 |
disconnect to actually propagate and thus we might issue a 'net'
|
1780 |
command while the device is still connected. If the device will
|
1781 |
still be attached to the network and we time out, we raise an
|
1782 |
exception.
|
1783 |
|
1784 |
"""
|
1785 |
if self.minor is None: |
1786 |
_ThrowError("drbd%d: disk not attached in re-attach net", self._aminor) |
1787 |
|
1788 |
if None in (self._lhost, self._lport, self._rhost, self._rport): |
1789 |
_ThrowError("drbd%d: DRBD disk missing network info in"
|
1790 |
" DisconnectNet()", self.minor) |
1791 |
|
1792 |
class _DisconnectStatus: |
1793 |
def __init__(self, ever_disconnected): |
1794 |
self.ever_disconnected = ever_disconnected
|
1795 |
|
1796 |
dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor)) |
1797 |
|
1798 |
def _WaitForDisconnect(): |
1799 |
if self.GetProcStatus().is_standalone: |
1800 |
return
|
1801 |
|
1802 |
# retry the disconnect, it seems possible that due to a well-time
|
1803 |
# disconnect on the peer, my disconnect command might be ignored and
|
1804 |
# forgotten
|
1805 |
dstatus.ever_disconnected = \ |
1806 |
_IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected |
1807 |
|
1808 |
raise utils.RetryAgain()
|
1809 |
|
1810 |
# Keep start time
|
1811 |
start_time = time.time() |
1812 |
|
1813 |
try:
|
1814 |
# Start delay at 100 milliseconds and grow up to 2 seconds
|
1815 |
utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0), |
1816 |
self._NET_RECONFIG_TIMEOUT)
|
1817 |
except utils.RetryTimeout:
|
1818 |
if dstatus.ever_disconnected:
|
1819 |
msg = ("drbd%d: device did not react to the"
|
1820 |
" 'disconnect' command in a timely manner")
|
1821 |
else:
|
1822 |
msg = "drbd%d: can't shutdown network, even after multiple retries"
|
1823 |
|
1824 |
_ThrowError(msg, self.minor)
|
1825 |
|
1826 |
reconfig_time = time.time() - start_time |
1827 |
if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25): |
1828 |
logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
|
1829 |
self.minor, reconfig_time)
|
1830 |
|
1831 |
def AttachNet(self, multimaster): |
1832 |
"""Reconnects the network.
|
1833 |
|
1834 |
This method connects the network side of the device with a
|
1835 |
specified multi-master flag. The device needs to be 'Standalone'
|
1836 |
but have valid network configuration data.
|
1837 |
|
1838 |
Args:
|
1839 |
- multimaster: init the network in dual-primary mode
|
1840 |
|
1841 |
"""
|
1842 |
if self.minor is None: |
1843 |
_ThrowError("drbd%d: device not attached in AttachNet", self._aminor) |
1844 |
|
1845 |
if None in (self._lhost, self._lport, self._rhost, self._rport): |
1846 |
_ThrowError("drbd%d: missing network info in AttachNet()", self.minor) |
1847 |
|
1848 |
status = self.GetProcStatus()
|
1849 |
|
1850 |
if not status.is_standalone: |
1851 |
_ThrowError("drbd%d: device is not standalone in AttachNet", self.minor) |
1852 |
|
1853 |
self._AssembleNet(self.minor, |
1854 |
(self._lhost, self._lport, self._rhost, self._rport), |
1855 |
constants.DRBD_NET_PROTOCOL, dual_pri=multimaster, |
1856 |
hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
|
1857 |
|
1858 |
def Attach(self): |
1859 |
"""Check if our minor is configured.
|
1860 |
|
1861 |
This doesn't do any device configurations - it only checks if the
|
1862 |
minor is in a state different from Unconfigured.
|
1863 |
|
1864 |
Note that this function will not change the state of the system in
|
1865 |
any way (except in case of side-effects caused by reading from
|
1866 |
/proc).
|
1867 |
|
1868 |
"""
|
1869 |
used_devs = self.GetUsedDevs()
|
1870 |
if self._aminor in used_devs: |
1871 |
minor = self._aminor
|
1872 |
else:
|
1873 |
minor = None
|
1874 |
|
1875 |
self._SetFromMinor(minor)
|
1876 |
return minor is not None |
1877 |
|
1878 |
def Assemble(self): |
1879 |
"""Assemble the drbd.
|
1880 |
|
1881 |
Method:
|
1882 |
- if we have a configured device, we try to ensure that it matches
|
1883 |
our config
|
1884 |
- if not, we create it from zero
|
1885 |
- anyway, set the device parameters
|
1886 |
|
1887 |
"""
|
1888 |
super(DRBD8, self).Assemble() |
1889 |
|
1890 |
self.Attach()
|
1891 |
if self.minor is None: |
1892 |
# local device completely unconfigured
|
1893 |
self._FastAssemble()
|
1894 |
else:
|
1895 |
# we have to recheck the local and network status and try to fix
|
1896 |
# the device
|
1897 |
self._SlowAssemble()
|
1898 |
|
1899 |
sync_errors = self.SetSyncParams(self.params) |
1900 |
if sync_errors:
|
1901 |
_ThrowError("drbd%d: can't set the synchronization parameters: %s" %
|
1902 |
(self.minor, utils.CommaJoin(sync_errors)))
|
1903 |
|
1904 |
def _SlowAssemble(self): |
1905 |
"""Assembles the DRBD device from a (partially) configured device.
|
1906 |
|
1907 |
In case of partially attached (local device matches but no network
|
1908 |
setup), we perform the network attach. If successful, we re-test
|
1909 |
the attach if can return success.
|
1910 |
|
1911 |
"""
|
1912 |
# TODO: Rewrite to not use a for loop just because there is 'break'
|
1913 |
# pylint: disable=W0631
|
1914 |
net_data = (self._lhost, self._lport, self._rhost, self._rport) |
1915 |
for minor in (self._aminor,): |
1916 |
info = self._GetDevInfo(self._GetShowData(minor)) |
1917 |
match_l = self._MatchesLocal(info)
|
1918 |
match_r = self._MatchesNet(info)
|
1919 |
|
1920 |
if match_l and match_r: |
1921 |
# everything matches
|
1922 |
break
|
1923 |
|
1924 |
if match_l and not match_r and "local_addr" not in info: |
1925 |
# disk matches, but not attached to network, attach and recheck
|
1926 |
self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
|
1927 |
hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
|
1928 |
if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))): |
1929 |
break
|
1930 |
else:
|
1931 |
_ThrowError("drbd%d: network attach successful, but 'drbdsetup"
|
1932 |
" show' disagrees", minor)
|
1933 |
|
1934 |
if match_r and "local_dev" not in info: |
1935 |
# no local disk, but network attached and it matches
|
1936 |
self._AssembleLocal(minor, self._children[0].dev_path, |
1937 |
self._children[1].dev_path, self.size) |
1938 |
if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))): |
1939 |
break
|
1940 |
else:
|
1941 |
_ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
|
1942 |
" show' disagrees", minor)
|
1943 |
|
1944 |
# this case must be considered only if we actually have local
|
1945 |
# storage, i.e. not in diskless mode, because all diskless
|
1946 |
# devices are equal from the point of view of local
|
1947 |
# configuration
|
1948 |
if (match_l and "local_dev" in info and |
1949 |
not match_r and "local_addr" in info): |
1950 |
# strange case - the device network part points to somewhere
|
1951 |
# else, even though its local storage is ours; as we own the
|
1952 |
# drbd space, we try to disconnect from the remote peer and
|
1953 |
# reconnect to our correct one
|
1954 |
try:
|
1955 |
self._ShutdownNet(minor)
|
1956 |
except errors.BlockDeviceError, err:
|
1957 |
_ThrowError("drbd%d: device has correct local storage, wrong"
|
1958 |
" remote peer and is unable to disconnect in order"
|
1959 |
" to attach to the correct peer: %s", minor, str(err)) |
1960 |
# note: _AssembleNet also handles the case when we don't want
|
1961 |
# local storage (i.e. one or more of the _[lr](host|port) is
|
1962 |
# None)
|
1963 |
self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
|
1964 |
hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
|
1965 |
if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))): |
1966 |
break
|
1967 |
else:
|
1968 |
_ThrowError("drbd%d: network attach successful, but 'drbdsetup"
|
1969 |
" show' disagrees", minor)
|
1970 |
|
1971 |
else:
|
1972 |
minor = None
|
1973 |
|
1974 |
self._SetFromMinor(minor)
|
1975 |
if minor is None: |
1976 |
_ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
|
1977 |
self._aminor)
|
1978 |
|
1979 |
def _FastAssemble(self): |
1980 |
"""Assemble the drbd device from zero.
|
1981 |
|
1982 |
This is run when in Assemble we detect our minor is unused.
|
1983 |
|
1984 |
"""
|
1985 |
minor = self._aminor
|
1986 |
if self._children and self._children[0] and self._children[1]: |
1987 |
self._AssembleLocal(minor, self._children[0].dev_path, |
1988 |
self._children[1].dev_path, self.size) |
1989 |
if self._lhost and self._lport and self._rhost and self._rport: |
1990 |
self._AssembleNet(minor,
|
1991 |
(self._lhost, self._lport, self._rhost, self._rport), |
1992 |
constants.DRBD_NET_PROTOCOL, |
1993 |
hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
|
1994 |
self._SetFromMinor(minor)
|
1995 |
|
1996 |
@classmethod
|
1997 |
def _ShutdownLocal(cls, minor): |
1998 |
"""Detach from the local device.
|
1999 |
|
2000 |
I/Os will continue to be served from the remote device. If we
|
2001 |
don't have a remote device, this operation will fail.
|
2002 |
|
2003 |
"""
|
2004 |
result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"]) |
2005 |
if result.failed:
|
2006 |
_ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
|
2007 |
|
2008 |
@classmethod
|
2009 |
def _ShutdownNet(cls, minor): |
2010 |
"""Disconnect from the remote peer.
|
2011 |
|
2012 |
This fails if we don't have a local device.
|
2013 |
|
2014 |
"""
|
2015 |
result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"]) |
2016 |
if result.failed:
|
2017 |
_ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
|
2018 |
|
2019 |
@classmethod
|
2020 |
def _ShutdownAll(cls, minor): |
2021 |
"""Deactivate the device.
|
2022 |
|
2023 |
This will, of course, fail if the device is in use.
|
2024 |
|
2025 |
"""
|
2026 |
result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"]) |
2027 |
if result.failed:
|
2028 |
_ThrowError("drbd%d: can't shutdown drbd device: %s",
|
2029 |
minor, result.output) |
2030 |
|
2031 |
def Shutdown(self): |
2032 |
"""Shutdown the DRBD device.
|
2033 |
|
2034 |
"""
|
2035 |
if self.minor is None and not self.Attach(): |
2036 |
logging.info("drbd%d: not attached during Shutdown()", self._aminor) |
2037 |
return
|
2038 |
minor = self.minor
|
2039 |
self.minor = None |
2040 |
self.dev_path = None |
2041 |
self._ShutdownAll(minor)
|
2042 |
|
2043 |
def Remove(self): |
2044 |
"""Stub remove for DRBD devices.
|
2045 |
|
2046 |
"""
|
2047 |
self.Shutdown()
|
2048 |
|
2049 |
@classmethod
|
2050 |
def Create(cls, unique_id, children, size, params): |
2051 |
"""Create a new DRBD8 device.
|
2052 |
|
2053 |
Since DRBD devices are not created per se, just assembled, this
|
2054 |
function only initializes the metadata.
|
2055 |
|
2056 |
"""
|
2057 |
if len(children) != 2: |
2058 |
raise errors.ProgrammerError("Invalid setup for the drbd device") |
2059 |
# check that the minor is unused
|
2060 |
aminor = unique_id[4]
|
2061 |
proc_info = cls._MassageProcData(cls._GetProcData()) |
2062 |
if aminor in proc_info: |
2063 |
status = DRBD8Status(proc_info[aminor]) |
2064 |
in_use = status.is_in_use |
2065 |
else:
|
2066 |
in_use = False
|
2067 |
if in_use:
|
2068 |
_ThrowError("drbd%d: minor is already in use at Create() time", aminor)
|
2069 |
meta = children[1]
|
2070 |
meta.Assemble() |
2071 |
if not meta.Attach(): |
2072 |
_ThrowError("drbd%d: can't attach to meta device '%s'",
|
2073 |
aminor, meta) |
2074 |
cls._CheckMetaSize(meta.dev_path) |
2075 |
cls._InitMeta(aminor, meta.dev_path) |
2076 |
return cls(unique_id, children, size, params)
|
2077 |
|
2078 |
def Grow(self, amount, dryrun, backingstore): |
2079 |
"""Resize the DRBD device and its backing storage.
|
2080 |
|
2081 |
"""
|
2082 |
if self.minor is None: |
2083 |
_ThrowError("drbd%d: Grow called while not attached", self._aminor) |
2084 |
if len(self._children) != 2 or None in self._children: |
2085 |
_ThrowError("drbd%d: cannot grow diskless device", self.minor) |
2086 |
self._children[0].Grow(amount, dryrun, backingstore) |
2087 |
if dryrun or backingstore: |
2088 |
# DRBD does not support dry-run mode and is not backing storage,
|
2089 |
# so we'll return here
|
2090 |
return
|
2091 |
result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s", |
2092 |
"%dm" % (self.size + amount)]) |
2093 |
if result.failed:
|
2094 |
_ThrowError("drbd%d: resize failed: %s", self.minor, result.output) |
2095 |
|
2096 |
|
2097 |
class FileStorage(BlockDev): |
2098 |
"""File device.
|
2099 |
|
2100 |
This class represents the a file storage backend device.
|
2101 |
|
2102 |
The unique_id for the file device is a (file_driver, file_path) tuple.
|
2103 |
|
2104 |
"""
|
2105 |
def __init__(self, unique_id, children, size, params): |
2106 |
"""Initalizes a file device backend.
|
2107 |
|
2108 |
"""
|
2109 |
if children:
|
2110 |
raise errors.BlockDeviceError("Invalid setup for file device") |
2111 |
super(FileStorage, self).__init__(unique_id, children, size, params) |
2112 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
2113 |
raise ValueError("Invalid configuration data %s" % str(unique_id)) |
2114 |
self.driver = unique_id[0] |
2115 |
self.dev_path = unique_id[1] |
2116 |
self.Attach()
|
2117 |
|
2118 |
def Assemble(self): |
2119 |
"""Assemble the device.
|
2120 |
|
2121 |
Checks whether the file device exists, raises BlockDeviceError otherwise.
|
2122 |
|
2123 |
"""
|
2124 |
if not os.path.exists(self.dev_path): |
2125 |
_ThrowError("File device '%s' does not exist" % self.dev_path) |
2126 |
|
2127 |
def Shutdown(self): |
2128 |
"""Shutdown the device.
|
2129 |
|
2130 |
This is a no-op for the file type, as we don't deactivate
|
2131 |
the file on shutdown.
|
2132 |
|
2133 |
"""
|
2134 |
pass
|
2135 |
|
2136 |
def Open(self, force=False): |
2137 |
"""Make the device ready for I/O.
|
2138 |
|
2139 |
This is a no-op for the file type.
|
2140 |
|
2141 |
"""
|
2142 |
pass
|
2143 |
|
2144 |
def Close(self): |
2145 |
"""Notifies that the device will no longer be used for I/O.
|
2146 |
|
2147 |
This is a no-op for the file type.
|
2148 |
|
2149 |
"""
|
2150 |
pass
|
2151 |
|
2152 |
def Remove(self): |
2153 |
"""Remove the file backing the block device.
|
2154 |
|
2155 |
@rtype: boolean
|
2156 |
@return: True if the removal was successful
|
2157 |
|
2158 |
"""
|
2159 |
try:
|
2160 |
os.remove(self.dev_path)
|
2161 |
except OSError, err: |
2162 |
if err.errno != errno.ENOENT:
|
2163 |
_ThrowError("Can't remove file '%s': %s", self.dev_path, err) |
2164 |
|
2165 |
def Rename(self, new_id): |
2166 |
"""Renames the file.
|
2167 |
|
2168 |
"""
|
2169 |
# TODO: implement rename for file-based storage
|
2170 |
_ThrowError("Rename is not supported for file-based storage")
|
2171 |
|
2172 |
def Grow(self, amount, dryrun, backingstore): |
2173 |
"""Grow the file
|
2174 |
|
2175 |
@param amount: the amount (in mebibytes) to grow with
|
2176 |
|
2177 |
"""
|
2178 |
if not backingstore: |
2179 |
return
|
2180 |
# Check that the file exists
|
2181 |
self.Assemble()
|
2182 |
current_size = self.GetActualSize()
|
2183 |
new_size = current_size + amount * 1024 * 1024 |
2184 |
assert new_size > current_size, "Cannot Grow with a negative amount" |
2185 |
# We can't really simulate the growth
|
2186 |
if dryrun:
|
2187 |
return
|
2188 |
try:
|
2189 |
f = open(self.dev_path, "a+") |
2190 |
f.truncate(new_size) |
2191 |
f.close() |
2192 |
except EnvironmentError, err: |
2193 |
_ThrowError("Error in file growth: %", str(err)) |
2194 |
|
2195 |
def Attach(self): |
2196 |
"""Attach to an existing file.
|
2197 |
|
2198 |
Check if this file already exists.
|
2199 |
|
2200 |
@rtype: boolean
|
2201 |
@return: True if file exists
|
2202 |
|
2203 |
"""
|
2204 |
self.attached = os.path.exists(self.dev_path) |
2205 |
return self.attached |
2206 |
|
2207 |
def GetActualSize(self): |
2208 |
"""Return the actual disk size.
|
2209 |
|
2210 |
@note: the device needs to be active when this is called
|
2211 |
|
2212 |
"""
|
2213 |
assert self.attached, "BlockDevice not attached in GetActualSize()" |
2214 |
try:
|
2215 |
st = os.stat(self.dev_path)
|
2216 |
return st.st_size
|
2217 |
except OSError, err: |
2218 |
_ThrowError("Can't stat %s: %s", self.dev_path, err) |
2219 |
|
2220 |
@classmethod
|
2221 |
def Create(cls, unique_id, children, size, params): |
2222 |
"""Create a new file.
|
2223 |
|
2224 |
@param size: the size of file in MiB
|
2225 |
|
2226 |
@rtype: L{bdev.FileStorage}
|
2227 |
@return: an instance of FileStorage
|
2228 |
|
2229 |
"""
|
2230 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
2231 |
raise ValueError("Invalid configuration data %s" % str(unique_id)) |
2232 |
dev_path = unique_id[1]
|
2233 |
try:
|
2234 |
fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL) |
2235 |
f = os.fdopen(fd, "w")
|
2236 |
f.truncate(size * 1024 * 1024) |
2237 |
f.close() |
2238 |
except EnvironmentError, err: |
2239 |
if err.errno == errno.EEXIST:
|
2240 |
_ThrowError("File already existing: %s", dev_path)
|
2241 |
_ThrowError("Error in file creation: %", str(err)) |
2242 |
|
2243 |
return FileStorage(unique_id, children, size, params)
|
2244 |
|
2245 |
|
2246 |
class PersistentBlockDevice(BlockDev): |
2247 |
"""A block device with persistent node
|
2248 |
|
2249 |
May be either directly attached, or exposed through DM (e.g. dm-multipath).
|
2250 |
udev helpers are probably required to give persistent, human-friendly
|
2251 |
names.
|
2252 |
|
2253 |
For the time being, pathnames are required to lie under /dev.
|
2254 |
|
2255 |
"""
|
2256 |
def __init__(self, unique_id, children, size, params): |
2257 |
"""Attaches to a static block device.
|
2258 |
|
2259 |
The unique_id is a path under /dev.
|
2260 |
|
2261 |
"""
|
2262 |
super(PersistentBlockDevice, self).__init__(unique_id, children, size, |
2263 |
params) |
2264 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
2265 |
raise ValueError("Invalid configuration data %s" % str(unique_id)) |
2266 |
self.dev_path = unique_id[1] |
2267 |
if not os.path.realpath(self.dev_path).startswith("/dev/"): |
2268 |
raise ValueError("Full path '%s' lies outside /dev" % |
2269 |
os.path.realpath(self.dev_path))
|
2270 |
# TODO: this is just a safety guard checking that we only deal with devices
|
2271 |
# we know how to handle. In the future this will be integrated with
|
2272 |
# external storage backends and possible values will probably be collected
|
2273 |
# from the cluster configuration.
|
2274 |
if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL: |
2275 |
raise ValueError("Got persistent block device of invalid type: %s" % |
2276 |
unique_id[0])
|
2277 |
|
2278 |
self.major = self.minor = None |
2279 |
self.Attach()
|
2280 |
|
2281 |
@classmethod
|
2282 |
def Create(cls, unique_id, children, size, params): |
2283 |
"""Create a new device
|
2284 |
|
2285 |
This is a noop, we only return a PersistentBlockDevice instance
|
2286 |
|
2287 |
"""
|
2288 |
return PersistentBlockDevice(unique_id, children, 0, params) |
2289 |
|
2290 |
def Remove(self): |
2291 |
"""Remove a device
|
2292 |
|
2293 |
This is a noop
|
2294 |
|
2295 |
"""
|
2296 |
pass
|
2297 |
|
2298 |
def Rename(self, new_id): |
2299 |
"""Rename this device.
|
2300 |
|
2301 |
"""
|
2302 |
_ThrowError("Rename is not supported for PersistentBlockDev storage")
|
2303 |
|
2304 |
def Attach(self): |
2305 |
"""Attach to an existing block device.
|
2306 |
|
2307 |
|
2308 |
"""
|
2309 |
self.attached = False |
2310 |
try:
|
2311 |
st = os.stat(self.dev_path)
|
2312 |
except OSError, err: |
2313 |
logging.error("Error stat()'ing %s: %s", self.dev_path, str(err)) |
2314 |
return False |
2315 |
|
2316 |
if not stat.S_ISBLK(st.st_mode): |
2317 |
logging.error("%s is not a block device", self.dev_path) |
2318 |
return False |
2319 |
|
2320 |
self.major = os.major(st.st_rdev)
|
2321 |
self.minor = os.minor(st.st_rdev)
|
2322 |
self.attached = True |
2323 |
|
2324 |
return True |
2325 |
|
2326 |
def Assemble(self): |
2327 |
"""Assemble the device.
|
2328 |
|
2329 |
"""
|
2330 |
pass
|
2331 |
|
2332 |
def Shutdown(self): |
2333 |
"""Shutdown the device.
|
2334 |
|
2335 |
"""
|
2336 |
pass
|
2337 |
|
2338 |
def Open(self, force=False): |
2339 |
"""Make the device ready for I/O.
|
2340 |
|
2341 |
"""
|
2342 |
pass
|
2343 |
|
2344 |
def Close(self): |
2345 |
"""Notifies that the device will no longer be used for I/O.
|
2346 |
|
2347 |
"""
|
2348 |
pass
|
2349 |
|
2350 |
def Grow(self, amount, dryrun, backingstore): |
2351 |
"""Grow the logical volume.
|
2352 |
|
2353 |
"""
|
2354 |
_ThrowError("Grow is not supported for PersistentBlockDev storage")
|
2355 |
|
2356 |
|
2357 |
class RADOSBlockDevice(BlockDev): |
2358 |
"""A RADOS Block Device (rbd).
|
2359 |
|
2360 |
This class implements the RADOS Block Device for the backend. You need
|
2361 |
the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
|
2362 |
this to be functional.
|
2363 |
|
2364 |
"""
|
2365 |
def __init__(self, unique_id, children, size, params): |
2366 |
"""Attaches to an rbd device.
|
2367 |
|
2368 |
"""
|
2369 |
super(RADOSBlockDevice, self).__init__(unique_id, children, size, params) |
2370 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
2371 |
raise ValueError("Invalid configuration data %s" % str(unique_id)) |
2372 |
|
2373 |
self.driver, self.rbd_name = unique_id |
2374 |
|
2375 |
self.major = self.minor = None |
2376 |
self.Attach()
|
2377 |
|
2378 |
@classmethod
|
2379 |
def Create(cls, unique_id, children, size, params): |
2380 |
"""Create a new rbd device.
|
2381 |
|
2382 |
Provision a new rbd volume inside a RADOS pool.
|
2383 |
|
2384 |
"""
|
2385 |
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: |
2386 |
raise errors.ProgrammerError("Invalid configuration data %s" % |
2387 |
str(unique_id))
|
2388 |
rbd_pool = params[constants.LDP_POOL] |
2389 |
rbd_name = unique_id[1]
|
2390 |
|
2391 |
# Provision a new rbd volume (Image) inside the RADOS cluster.
|
2392 |
cmd = [constants.RBD_CMD, "create", "-p", rbd_pool, |
2393 |
rbd_name, "--size", "%s" % size] |
2394 |
result = utils.RunCmd(cmd) |
2395 |
if result.failed:
|
2396 |
_ThrowError("rbd creation failed (%s): %s",
|
2397 |
result.fail_reason, result.output) |
2398 |
|
2399 |
return RADOSBlockDevice(unique_id, children, size, params)
|
2400 |
|
2401 |
def Remove(self): |
2402 |
"""Remove the rbd device.
|
2403 |
|
2404 |
"""
|
2405 |
rbd_pool = self.params[constants.LDP_POOL]
|
2406 |
rbd_name = self.unique_id[1] |
2407 |
|
2408 |
if not self.minor and not self.Attach(): |
2409 |
# The rbd device doesn't exist.
|
2410 |
return
|
2411 |
|
2412 |
# First shutdown the device (remove mappings).
|
2413 |
self.Shutdown()
|
2414 |
|
2415 |
# Remove the actual Volume (Image) from the RADOS cluster.
|
2416 |
cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name] |
2417 |
result = utils.RunCmd(cmd) |
2418 |
if result.failed:
|
2419 |
_ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
|
2420 |
result.fail_reason, result.output) |
2421 |
|
2422 |
def Rename(self, new_id): |
2423 |
"""Rename this device.
|
2424 |
|
2425 |
"""
|
2426 |
pass
|
2427 |
|
2428 |
def Attach(self): |
2429 |
"""Attach to an existing rbd device.
|
2430 |
|
2431 |
This method maps the rbd volume that matches our name with
|
2432 |
an rbd device and then attaches to this device.
|
2433 |
|
2434 |
"""
|
2435 |
self.attached = False |
2436 |
|
2437 |
# Map the rbd volume to a block device under /dev
|
2438 |
self.dev_path = self._MapVolumeToBlockdev(self.unique_id) |
2439 |
|
2440 |
try:
|
2441 |
st = os.stat(self.dev_path)
|
2442 |
except OSError, err: |
2443 |
logging.error("Error stat()'ing %s: %s", self.dev_path, str(err)) |
2444 |
return False |
2445 |
|
2446 |
if not stat.S_ISBLK(st.st_mode): |
2447 |
logging.error("%s is not a block device", self.dev_path) |
2448 |
return False |
2449 |
|
2450 |
self.major = os.major(st.st_rdev)
|
2451 |
self.minor = os.minor(st.st_rdev)
|
2452 |
self.attached = True |
2453 |
|
2454 |
return True |
2455 |
|
2456 |
def _MapVolumeToBlockdev(self, unique_id): |
2457 |
"""Maps existing rbd volumes to block devices.
|
2458 |
|
2459 |
This method should be idempotent if the mapping already exists.
|
2460 |
|
2461 |
@rtype: string
|
2462 |
@return: the block device path that corresponds to the volume
|
2463 |
|
2464 |
"""
|
2465 |
pool = self.params[constants.LDP_POOL]
|
2466 |
name = unique_id[1]
|
2467 |
|
2468 |
# Check if the mapping already exists.
|
2469 |
showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool] |
2470 |
result = utils.RunCmd(showmap_cmd) |
2471 |
if result.failed:
|
2472 |
_ThrowError("rbd showmapped failed (%s): %s",
|
2473 |
result.fail_reason, result.output) |
2474 |
|
2475 |
rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
|
2476 |
|
2477 |
if rbd_dev:
|
2478 |
# The mapping exists. Return it.
|
2479 |
return rbd_dev
|
2480 |
|
2481 |
# The mapping doesn't exist. Create it.
|
2482 |
map_cmd = [constants.RBD_CMD, "map", "-p", pool, name] |
2483 |
result = utils.RunCmd(map_cmd) |
2484 |
if result.failed:
|
2485 |
_ThrowError("rbd map failed (%s): %s",
|
2486 |
result.fail_reason, result.output) |
2487 |
|
2488 |
# Find the corresponding rbd device.
|
2489 |
showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool] |
2490 |
result = utils.RunCmd(showmap_cmd) |
2491 |
if result.failed:
|
2492 |
_ThrowError("rbd map succeeded, but showmapped failed (%s): %s",
|
2493 |
result.fail_reason, result.output) |
2494 |
|
2495 |
rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
|
2496 |
|
2497 |
if not rbd_dev: |
2498 |
_ThrowError("rbd map succeeded, but could not find the rbd block"
|
2499 |
" device in output of showmapped, for volume: %s", name)
|
2500 |
|
2501 |
# The device was successfully mapped. Return it.
|
2502 |
return rbd_dev
|
2503 |
|
2504 |
@staticmethod
|
2505 |
def _ParseRbdShowmappedOutput(output, volume_name): |
2506 |
"""Parse the output of `rbd showmapped'.
|
2507 |
|
2508 |
This method parses the output of `rbd showmapped' and returns
|
2509 |
the rbd block device path (e.g. /dev/rbd0) that matches the
|
2510 |
given rbd volume.
|
2511 |
|
2512 |
@type output: string
|
2513 |
@param output: the whole output of `rbd showmapped'
|
2514 |
@type volume_name: string
|
2515 |
@param volume_name: the name of the volume whose device we search for
|
2516 |
@rtype: string or None
|
2517 |
@return: block device path if the volume is mapped, else None
|
2518 |
|
2519 |
"""
|
2520 |
allfields = 5
|
2521 |
volumefield = 2
|
2522 |
devicefield = 4
|
2523 |
|
2524 |
field_sep = "\t"
|
2525 |
|
2526 |
lines = output.splitlines() |
2527 |
splitted_lines = map(lambda l: l.split(field_sep), lines) |
2528 |
|
2529 |
# Check empty output.
|
2530 |
if not splitted_lines: |
2531 |
_ThrowError("rbd showmapped returned empty output")
|
2532 |
|
2533 |
# Check showmapped header line, to determine number of fields.
|
2534 |
field_cnt = len(splitted_lines[0]) |
2535 |
if field_cnt != allfields:
|
2536 |
_ThrowError("Cannot parse rbd showmapped output because its format"
|
2537 |
" seems to have changed; expected %s fields, found %s",
|
2538 |
allfields, field_cnt) |
2539 |
|
2540 |
matched_lines = \ |
2541 |
filter(lambda l: len(l) == allfields and l[volumefield] == volume_name, |
2542 |
splitted_lines) |
2543 |
|
2544 |
if len(matched_lines) > 1: |
2545 |
_ThrowError("The rbd volume %s is mapped more than once."
|
2546 |
" This shouldn't happen, try to unmap the extra"
|
2547 |
" devices manually.", volume_name)
|
2548 |
|
2549 |
if matched_lines:
|
2550 |
# rbd block device found. Return it.
|
2551 |
rbd_dev = matched_lines[0][devicefield]
|
2552 |
return rbd_dev
|
2553 |
|
2554 |
# The given volume is not mapped.
|
2555 |
return None |
2556 |
|
2557 |
def Assemble(self): |
2558 |
"""Assemble the device.
|
2559 |
|
2560 |
"""
|
2561 |
pass
|
2562 |
|
2563 |
def Shutdown(self): |
2564 |
"""Shutdown the device.
|
2565 |
|
2566 |
"""
|
2567 |
if not self.minor and not self.Attach(): |
2568 |
# The rbd device doesn't exist.
|
2569 |
return
|
2570 |
|
2571 |
# Unmap the block device from the Volume.
|
2572 |
self._UnmapVolumeFromBlockdev(self.unique_id) |
2573 |
|
2574 |
self.minor = None |
2575 |
self.dev_path = None |
2576 |
|
2577 |
def _UnmapVolumeFromBlockdev(self, unique_id): |
2578 |
"""Unmaps the rbd device from the Volume it is mapped.
|
2579 |
|
2580 |
Unmaps the rbd device from the Volume it was previously mapped to.
|
2581 |
This method should be idempotent if the Volume isn't mapped.
|
2582 |
|
2583 |
"""
|
2584 |
pool = self.params[constants.LDP_POOL]
|
2585 |
name = unique_id[1]
|
2586 |
|
2587 |
# Check if the mapping already exists.
|
2588 |
showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool] |
2589 |
result = utils.RunCmd(showmap_cmd) |
2590 |
if result.failed:
|
2591 |
_ThrowError("rbd showmapped failed [during unmap](%s): %s",
|
2592 |
result.fail_reason, result.output) |
2593 |
|
2594 |
rbd_dev = self._ParseRbdShowmappedOutput(result.output, name)
|
2595 |
|
2596 |
if rbd_dev:
|
2597 |
# The mapping exists. Unmap the rbd device.
|
2598 |
unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev] |
2599 |
result = utils.RunCmd(unmap_cmd) |
2600 |
if result.failed:
|
2601 |
_ThrowError("rbd unmap failed (%s): %s",
|
2602 |
result.fail_reason, result.output) |
2603 |
|
2604 |
def Open(self, force=False): |
2605 |
"""Make the device ready for I/O.
|
2606 |
|
2607 |
"""
|
2608 |
pass
|
2609 |
|
2610 |
def Close(self): |
2611 |
"""Notifies that the device will no longer be used for I/O.
|
2612 |
|
2613 |
"""
|
2614 |
pass
|
2615 |
|
2616 |
def Grow(self, amount, dryrun, backingstore): |
2617 |
"""Grow the Volume.
|
2618 |
|
2619 |
@type amount: integer
|
2620 |
@param amount: the amount (in mebibytes) to grow with
|
2621 |
@type dryrun: boolean
|
2622 |
@param dryrun: whether to execute the operation in simulation mode
|
2623 |
only, without actually increasing the size
|
2624 |
|
2625 |
"""
|
2626 |
if not backingstore: |
2627 |
return
|
2628 |
if not self.Attach(): |
2629 |
_ThrowError("Can't attach to rbd device during Grow()")
|
2630 |
|
2631 |
if dryrun:
|
2632 |
# the rbd tool does not support dry runs of resize operations.
|
2633 |
# Since rbd volumes are thinly provisioned, we assume
|
2634 |
# there is always enough free space for the operation.
|
2635 |
return
|
2636 |
|
2637 |
rbd_pool = self.params[constants.LDP_POOL]
|
2638 |
rbd_name = self.unique_id[1] |
2639 |
new_size = self.size + amount
|
2640 |
|
2641 |
# Resize the rbd volume (Image) inside the RADOS cluster.
|
2642 |
cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool, |
2643 |
rbd_name, "--size", "%s" % new_size] |
2644 |
result = utils.RunCmd(cmd) |
2645 |
if result.failed:
|
2646 |
_ThrowError("rbd resize failed (%s): %s",
|
2647 |
result.fail_reason, result.output) |
2648 |
|
2649 |
|
2650 |
DEV_MAP = { |
2651 |
constants.LD_LV: LogicalVolume, |
2652 |
constants.LD_DRBD8: DRBD8, |
2653 |
constants.LD_BLOCKDEV: PersistentBlockDevice, |
2654 |
constants.LD_RBD: RADOSBlockDevice, |
2655 |
} |
2656 |
|
2657 |
if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE: |
2658 |
DEV_MAP[constants.LD_FILE] = FileStorage |
2659 |
|
2660 |
|
2661 |
def _VerifyDiskType(dev_type): |
2662 |
if dev_type not in DEV_MAP: |
2663 |
raise errors.ProgrammerError("Invalid block device type '%s'" % dev_type) |
2664 |
|
2665 |
|
2666 |
def _VerifyDiskParams(disk): |
2667 |
"""Verifies if all disk parameters are set.
|
2668 |
|
2669 |
"""
|
2670 |
missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params) |
2671 |
if missing:
|
2672 |
raise errors.ProgrammerError("Block device is missing disk parameters: %s" % |
2673 |
missing) |
2674 |
|
2675 |
|
2676 |
def FindDevice(disk, children): |
2677 |
"""Search for an existing, assembled device.
|
2678 |
|
2679 |
This will succeed only if the device exists and is assembled, but it
|
2680 |
does not do any actions in order to activate the device.
|
2681 |
|
2682 |
@type disk: L{objects.Disk}
|
2683 |
@param disk: the disk object to find
|
2684 |
@type children: list of L{bdev.BlockDev}
|
2685 |
@param children: the list of block devices that are children of the device
|
2686 |
represented by the disk parameter
|
2687 |
|
2688 |
"""
|
2689 |
_VerifyDiskType(disk.dev_type) |
2690 |
device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size, |
2691 |
disk.params) |
2692 |
if not device.attached: |
2693 |
return None |
2694 |
return device
|
2695 |
|
2696 |
|
2697 |
def Assemble(disk, children): |
2698 |
"""Try to attach or assemble an existing device.
|
2699 |
|
2700 |
This will attach to assemble the device, as needed, to bring it
|
2701 |
fully up. It must be safe to run on already-assembled devices.
|
2702 |
|
2703 |
@type disk: L{objects.Disk}
|
2704 |
@param disk: the disk object to assemble
|
2705 |
@type children: list of L{bdev.BlockDev}
|
2706 |
@param children: the list of block devices that are children of the device
|
2707 |
represented by the disk parameter
|
2708 |
|
2709 |
"""
|
2710 |
_VerifyDiskType(disk.dev_type) |
2711 |
_VerifyDiskParams(disk) |
2712 |
device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size, |
2713 |
disk.params) |
2714 |
device.Assemble() |
2715 |
return device
|
2716 |
|
2717 |
|
2718 |
def Create(disk, children): |
2719 |
"""Create a device.
|
2720 |
|
2721 |
@type disk: L{objects.Disk}
|
2722 |
@param disk: the disk object to create
|
2723 |
@type children: list of L{bdev.BlockDev}
|
2724 |
@param children: the list of block devices that are children of the device
|
2725 |
represented by the disk parameter
|
2726 |
|
2727 |
"""
|
2728 |
_VerifyDiskType(disk.dev_type) |
2729 |
_VerifyDiskParams(disk) |
2730 |
device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size, |
2731 |
disk.params) |
2732 |
return device
|