root / block / qed.c @ 6f6dc656
History | View | Annotate | Download (43.3 kB)
1 |
/*
|
---|---|
2 |
* QEMU Enhanced Disk Format
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2010
|
5 |
*
|
6 |
* Authors:
|
7 |
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
11 |
* See the COPYING.LIB file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include "qemu-timer.h" |
16 |
#include "trace.h" |
17 |
#include "qed.h" |
18 |
#include "qerror.h" |
19 |
|
20 |
static void qed_aio_cancel(BlockDriverAIOCB *blockacb) |
21 |
{ |
22 |
QEDAIOCB *acb = (QEDAIOCB *)blockacb; |
23 |
bool finished = false; |
24 |
|
25 |
/* Wait for the request to finish */
|
26 |
acb->finished = &finished; |
27 |
while (!finished) {
|
28 |
qemu_aio_wait(); |
29 |
} |
30 |
} |
31 |
|
32 |
static AIOPool qed_aio_pool = {
|
33 |
.aiocb_size = sizeof(QEDAIOCB),
|
34 |
.cancel = qed_aio_cancel, |
35 |
}; |
36 |
|
37 |
static int bdrv_qed_probe(const uint8_t *buf, int buf_size, |
38 |
const char *filename) |
39 |
{ |
40 |
const QEDHeader *header = (const QEDHeader *)buf; |
41 |
|
42 |
if (buf_size < sizeof(*header)) { |
43 |
return 0; |
44 |
} |
45 |
if (le32_to_cpu(header->magic) != QED_MAGIC) {
|
46 |
return 0; |
47 |
} |
48 |
return 100; |
49 |
} |
50 |
|
51 |
/**
|
52 |
* Check whether an image format is raw
|
53 |
*
|
54 |
* @fmt: Backing file format, may be NULL
|
55 |
*/
|
56 |
static bool qed_fmt_is_raw(const char *fmt) |
57 |
{ |
58 |
return fmt && strcmp(fmt, "raw") == 0; |
59 |
} |
60 |
|
61 |
static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) |
62 |
{ |
63 |
cpu->magic = le32_to_cpu(le->magic); |
64 |
cpu->cluster_size = le32_to_cpu(le->cluster_size); |
65 |
cpu->table_size = le32_to_cpu(le->table_size); |
66 |
cpu->header_size = le32_to_cpu(le->header_size); |
67 |
cpu->features = le64_to_cpu(le->features); |
68 |
cpu->compat_features = le64_to_cpu(le->compat_features); |
69 |
cpu->autoclear_features = le64_to_cpu(le->autoclear_features); |
70 |
cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); |
71 |
cpu->image_size = le64_to_cpu(le->image_size); |
72 |
cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); |
73 |
cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); |
74 |
} |
75 |
|
76 |
static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) |
77 |
{ |
78 |
le->magic = cpu_to_le32(cpu->magic); |
79 |
le->cluster_size = cpu_to_le32(cpu->cluster_size); |
80 |
le->table_size = cpu_to_le32(cpu->table_size); |
81 |
le->header_size = cpu_to_le32(cpu->header_size); |
82 |
le->features = cpu_to_le64(cpu->features); |
83 |
le->compat_features = cpu_to_le64(cpu->compat_features); |
84 |
le->autoclear_features = cpu_to_le64(cpu->autoclear_features); |
85 |
le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); |
86 |
le->image_size = cpu_to_le64(cpu->image_size); |
87 |
le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); |
88 |
le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); |
89 |
} |
90 |
|
91 |
static int qed_write_header_sync(BDRVQEDState *s) |
92 |
{ |
93 |
QEDHeader le; |
94 |
int ret;
|
95 |
|
96 |
qed_header_cpu_to_le(&s->header, &le); |
97 |
ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); |
98 |
if (ret != sizeof(le)) { |
99 |
return ret;
|
100 |
} |
101 |
return 0; |
102 |
} |
103 |
|
104 |
typedef struct { |
105 |
GenericCB gencb; |
106 |
BDRVQEDState *s; |
107 |
struct iovec iov;
|
108 |
QEMUIOVector qiov; |
109 |
int nsectors;
|
110 |
uint8_t *buf; |
111 |
} QEDWriteHeaderCB; |
112 |
|
113 |
static void qed_write_header_cb(void *opaque, int ret) |
114 |
{ |
115 |
QEDWriteHeaderCB *write_header_cb = opaque; |
116 |
|
117 |
qemu_vfree(write_header_cb->buf); |
118 |
gencb_complete(write_header_cb, ret); |
119 |
} |
120 |
|
121 |
static void qed_write_header_read_cb(void *opaque, int ret) |
122 |
{ |
123 |
QEDWriteHeaderCB *write_header_cb = opaque; |
124 |
BDRVQEDState *s = write_header_cb->s; |
125 |
BlockDriverAIOCB *acb; |
126 |
|
127 |
if (ret) {
|
128 |
qed_write_header_cb(write_header_cb, ret); |
129 |
return;
|
130 |
} |
131 |
|
132 |
/* Update header */
|
133 |
qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); |
134 |
|
135 |
acb = bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
|
136 |
write_header_cb->nsectors, qed_write_header_cb, |
137 |
write_header_cb); |
138 |
if (!acb) {
|
139 |
qed_write_header_cb(write_header_cb, -EIO); |
140 |
} |
141 |
} |
142 |
|
143 |
/**
|
144 |
* Update header in-place (does not rewrite backing filename or other strings)
|
145 |
*
|
146 |
* This function only updates known header fields in-place and does not affect
|
147 |
* extra data after the QED header.
|
148 |
*/
|
149 |
static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb, |
150 |
void *opaque)
|
151 |
{ |
152 |
/* We must write full sectors for O_DIRECT but cannot necessarily generate
|
153 |
* the data following the header if an unrecognized compat feature is
|
154 |
* active. Therefore, first read the sectors containing the header, update
|
155 |
* them, and write back.
|
156 |
*/
|
157 |
|
158 |
BlockDriverAIOCB *acb; |
159 |
int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / |
160 |
BDRV_SECTOR_SIZE; |
161 |
size_t len = nsectors * BDRV_SECTOR_SIZE; |
162 |
QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
|
163 |
cb, opaque); |
164 |
|
165 |
write_header_cb->s = s; |
166 |
write_header_cb->nsectors = nsectors; |
167 |
write_header_cb->buf = qemu_blockalign(s->bs, len); |
168 |
write_header_cb->iov.iov_base = write_header_cb->buf; |
169 |
write_header_cb->iov.iov_len = len; |
170 |
qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
|
171 |
|
172 |
acb = bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
|
173 |
qed_write_header_read_cb, write_header_cb); |
174 |
if (!acb) {
|
175 |
qed_write_header_cb(write_header_cb, -EIO); |
176 |
} |
177 |
} |
178 |
|
179 |
static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
|
180 |
{ |
181 |
uint64_t table_entries; |
182 |
uint64_t l2_size; |
183 |
|
184 |
table_entries = (table_size * cluster_size) / sizeof(uint64_t);
|
185 |
l2_size = table_entries * cluster_size; |
186 |
|
187 |
return l2_size * table_entries;
|
188 |
} |
189 |
|
190 |
static bool qed_is_cluster_size_valid(uint32_t cluster_size) |
191 |
{ |
192 |
if (cluster_size < QED_MIN_CLUSTER_SIZE ||
|
193 |
cluster_size > QED_MAX_CLUSTER_SIZE) { |
194 |
return false; |
195 |
} |
196 |
if (cluster_size & (cluster_size - 1)) { |
197 |
return false; /* not power of 2 */ |
198 |
} |
199 |
return true; |
200 |
} |
201 |
|
202 |
static bool qed_is_table_size_valid(uint32_t table_size) |
203 |
{ |
204 |
if (table_size < QED_MIN_TABLE_SIZE ||
|
205 |
table_size > QED_MAX_TABLE_SIZE) { |
206 |
return false; |
207 |
} |
208 |
if (table_size & (table_size - 1)) { |
209 |
return false; /* not power of 2 */ |
210 |
} |
211 |
return true; |
212 |
} |
213 |
|
214 |
static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, |
215 |
uint32_t table_size) |
216 |
{ |
217 |
if (image_size % BDRV_SECTOR_SIZE != 0) { |
218 |
return false; /* not multiple of sector size */ |
219 |
} |
220 |
if (image_size > qed_max_image_size(cluster_size, table_size)) {
|
221 |
return false; /* image is too large */ |
222 |
} |
223 |
return true; |
224 |
} |
225 |
|
226 |
/**
|
227 |
* Read a string of known length from the image file
|
228 |
*
|
229 |
* @file: Image file
|
230 |
* @offset: File offset to start of string, in bytes
|
231 |
* @n: String length in bytes
|
232 |
* @buf: Destination buffer
|
233 |
* @buflen: Destination buffer length in bytes
|
234 |
* @ret: 0 on success, -errno on failure
|
235 |
*
|
236 |
* The string is NUL-terminated.
|
237 |
*/
|
238 |
static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, |
239 |
char *buf, size_t buflen)
|
240 |
{ |
241 |
int ret;
|
242 |
if (n >= buflen) {
|
243 |
return -EINVAL;
|
244 |
} |
245 |
ret = bdrv_pread(file, offset, buf, n); |
246 |
if (ret < 0) { |
247 |
return ret;
|
248 |
} |
249 |
buf[n] = '\0';
|
250 |
return 0; |
251 |
} |
252 |
|
253 |
/**
|
254 |
* Allocate new clusters
|
255 |
*
|
256 |
* @s: QED state
|
257 |
* @n: Number of contiguous clusters to allocate
|
258 |
* @ret: Offset of first allocated cluster
|
259 |
*
|
260 |
* This function only produces the offset where the new clusters should be
|
261 |
* written. It updates BDRVQEDState but does not make any changes to the image
|
262 |
* file.
|
263 |
*/
|
264 |
static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) |
265 |
{ |
266 |
uint64_t offset = s->file_size; |
267 |
s->file_size += n * s->header.cluster_size; |
268 |
return offset;
|
269 |
} |
270 |
|
271 |
QEDTable *qed_alloc_table(BDRVQEDState *s) |
272 |
{ |
273 |
/* Honor O_DIRECT memory alignment requirements */
|
274 |
return qemu_blockalign(s->bs,
|
275 |
s->header.cluster_size * s->header.table_size); |
276 |
} |
277 |
|
278 |
/**
|
279 |
* Allocate a new zeroed L2 table
|
280 |
*/
|
281 |
static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
|
282 |
{ |
283 |
CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); |
284 |
|
285 |
l2_table->table = qed_alloc_table(s); |
286 |
l2_table->offset = qed_alloc_clusters(s, s->header.table_size); |
287 |
|
288 |
memset(l2_table->table->offsets, 0,
|
289 |
s->header.cluster_size * s->header.table_size); |
290 |
return l2_table;
|
291 |
} |
292 |
|
293 |
static void qed_aio_next_io(void *opaque, int ret); |
294 |
|
295 |
static void qed_plug_allocating_write_reqs(BDRVQEDState *s) |
296 |
{ |
297 |
assert(!s->allocating_write_reqs_plugged); |
298 |
|
299 |
s->allocating_write_reqs_plugged = true;
|
300 |
} |
301 |
|
302 |
static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
303 |
{ |
304 |
QEDAIOCB *acb; |
305 |
|
306 |
assert(s->allocating_write_reqs_plugged); |
307 |
|
308 |
s->allocating_write_reqs_plugged = false;
|
309 |
|
310 |
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
311 |
if (acb) {
|
312 |
qed_aio_next_io(acb, 0);
|
313 |
} |
314 |
} |
315 |
|
316 |
static void qed_finish_clear_need_check(void *opaque, int ret) |
317 |
{ |
318 |
/* Do nothing */
|
319 |
} |
320 |
|
321 |
static void qed_flush_after_clear_need_check(void *opaque, int ret) |
322 |
{ |
323 |
BDRVQEDState *s = opaque; |
324 |
|
325 |
bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); |
326 |
|
327 |
/* No need to wait until flush completes */
|
328 |
qed_unplug_allocating_write_reqs(s); |
329 |
} |
330 |
|
331 |
static void qed_clear_need_check(void *opaque, int ret) |
332 |
{ |
333 |
BDRVQEDState *s = opaque; |
334 |
|
335 |
if (ret) {
|
336 |
qed_unplug_allocating_write_reqs(s); |
337 |
return;
|
338 |
} |
339 |
|
340 |
s->header.features &= ~QED_F_NEED_CHECK; |
341 |
qed_write_header(s, qed_flush_after_clear_need_check, s); |
342 |
} |
343 |
|
344 |
static void qed_need_check_timer_cb(void *opaque) |
345 |
{ |
346 |
BDRVQEDState *s = opaque; |
347 |
|
348 |
/* The timer should only fire when allocating writes have drained */
|
349 |
assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); |
350 |
|
351 |
trace_qed_need_check_timer_cb(s); |
352 |
|
353 |
qed_plug_allocating_write_reqs(s); |
354 |
|
355 |
/* Ensure writes are on disk before clearing flag */
|
356 |
bdrv_aio_flush(s->bs, qed_clear_need_check, s); |
357 |
} |
358 |
|
359 |
static void qed_start_need_check_timer(BDRVQEDState *s) |
360 |
{ |
361 |
trace_qed_start_need_check_timer(s); |
362 |
|
363 |
/* Use vm_clock so we don't alter the image file while suspended for
|
364 |
* migration.
|
365 |
*/
|
366 |
qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) + |
367 |
get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); |
368 |
} |
369 |
|
370 |
/* It's okay to call this multiple times or when no timer is started */
|
371 |
static void qed_cancel_need_check_timer(BDRVQEDState *s) |
372 |
{ |
373 |
trace_qed_cancel_need_check_timer(s); |
374 |
qemu_del_timer(s->need_check_timer); |
375 |
} |
376 |
|
377 |
static int bdrv_qed_open(BlockDriverState *bs, int flags) |
378 |
{ |
379 |
BDRVQEDState *s = bs->opaque; |
380 |
QEDHeader le_header; |
381 |
int64_t file_size; |
382 |
int ret;
|
383 |
|
384 |
s->bs = bs; |
385 |
QSIMPLEQ_INIT(&s->allocating_write_reqs); |
386 |
|
387 |
ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); |
388 |
if (ret < 0) { |
389 |
return ret;
|
390 |
} |
391 |
ret = 0; /* ret should always be 0 or -errno */ |
392 |
qed_header_le_to_cpu(&le_header, &s->header); |
393 |
|
394 |
if (s->header.magic != QED_MAGIC) {
|
395 |
return -EINVAL;
|
396 |
} |
397 |
if (s->header.features & ~QED_FEATURE_MASK) {
|
398 |
/* image uses unsupported feature bits */
|
399 |
char buf[64]; |
400 |
snprintf(buf, sizeof(buf), "%" PRIx64, |
401 |
s->header.features & ~QED_FEATURE_MASK); |
402 |
qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, |
403 |
bs->device_name, "QED", buf);
|
404 |
return -ENOTSUP;
|
405 |
} |
406 |
if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
|
407 |
return -EINVAL;
|
408 |
} |
409 |
|
410 |
/* Round down file size to the last cluster */
|
411 |
file_size = bdrv_getlength(bs->file); |
412 |
if (file_size < 0) { |
413 |
return file_size;
|
414 |
} |
415 |
s->file_size = qed_start_of_cluster(s, file_size); |
416 |
|
417 |
if (!qed_is_table_size_valid(s->header.table_size)) {
|
418 |
return -EINVAL;
|
419 |
} |
420 |
if (!qed_is_image_size_valid(s->header.image_size,
|
421 |
s->header.cluster_size, |
422 |
s->header.table_size)) { |
423 |
return -EINVAL;
|
424 |
} |
425 |
if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
|
426 |
return -EINVAL;
|
427 |
} |
428 |
|
429 |
s->table_nelems = (s->header.cluster_size * s->header.table_size) / |
430 |
sizeof(uint64_t);
|
431 |
s->l2_shift = ffs(s->header.cluster_size) - 1;
|
432 |
s->l2_mask = s->table_nelems - 1;
|
433 |
s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
|
434 |
|
435 |
if ((s->header.features & QED_F_BACKING_FILE)) {
|
436 |
if ((uint64_t)s->header.backing_filename_offset +
|
437 |
s->header.backing_filename_size > |
438 |
s->header.cluster_size * s->header.header_size) { |
439 |
return -EINVAL;
|
440 |
} |
441 |
|
442 |
ret = qed_read_string(bs->file, s->header.backing_filename_offset, |
443 |
s->header.backing_filename_size, bs->backing_file, |
444 |
sizeof(bs->backing_file));
|
445 |
if (ret < 0) { |
446 |
return ret;
|
447 |
} |
448 |
|
449 |
if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
|
450 |
pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); |
451 |
} |
452 |
} |
453 |
|
454 |
/* Reset unknown autoclear feature bits. This is a backwards
|
455 |
* compatibility mechanism that allows images to be opened by older
|
456 |
* programs, which "knock out" unknown feature bits. When an image is
|
457 |
* opened by a newer program again it can detect that the autoclear
|
458 |
* feature is no longer valid.
|
459 |
*/
|
460 |
if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && |
461 |
!bdrv_is_read_only(bs->file)) { |
462 |
s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; |
463 |
|
464 |
ret = qed_write_header_sync(s); |
465 |
if (ret) {
|
466 |
return ret;
|
467 |
} |
468 |
|
469 |
/* From here on only known autoclear feature bits are valid */
|
470 |
bdrv_flush(bs->file); |
471 |
} |
472 |
|
473 |
s->l1_table = qed_alloc_table(s); |
474 |
qed_init_l2_cache(&s->l2_cache); |
475 |
|
476 |
ret = qed_read_l1_table_sync(s); |
477 |
if (ret) {
|
478 |
goto out;
|
479 |
} |
480 |
|
481 |
/* If image was not closed cleanly, check consistency */
|
482 |
if (s->header.features & QED_F_NEED_CHECK) {
|
483 |
/* Read-only images cannot be fixed. There is no risk of corruption
|
484 |
* since write operations are not possible. Therefore, allow
|
485 |
* potentially inconsistent images to be opened read-only. This can
|
486 |
* aid data recovery from an otherwise inconsistent image.
|
487 |
*/
|
488 |
if (!bdrv_is_read_only(bs->file)) {
|
489 |
BdrvCheckResult result = {0};
|
490 |
|
491 |
ret = qed_check(s, &result, true);
|
492 |
if (ret) {
|
493 |
goto out;
|
494 |
} |
495 |
if (!result.corruptions && !result.check_errors) {
|
496 |
/* Ensure fixes reach storage before clearing check bit */
|
497 |
bdrv_flush(s->bs); |
498 |
|
499 |
s->header.features &= ~QED_F_NEED_CHECK; |
500 |
qed_write_header_sync(s); |
501 |
} |
502 |
} |
503 |
} |
504 |
|
505 |
s->need_check_timer = qemu_new_timer_ns(vm_clock, |
506 |
qed_need_check_timer_cb, s); |
507 |
|
508 |
out:
|
509 |
if (ret) {
|
510 |
qed_free_l2_cache(&s->l2_cache); |
511 |
qemu_vfree(s->l1_table); |
512 |
} |
513 |
return ret;
|
514 |
} |
515 |
|
516 |
static void bdrv_qed_close(BlockDriverState *bs) |
517 |
{ |
518 |
BDRVQEDState *s = bs->opaque; |
519 |
|
520 |
qed_cancel_need_check_timer(s); |
521 |
qemu_free_timer(s->need_check_timer); |
522 |
|
523 |
/* Ensure writes reach stable storage */
|
524 |
bdrv_flush(bs->file); |
525 |
|
526 |
/* Clean shutdown, no check required on next open */
|
527 |
if (s->header.features & QED_F_NEED_CHECK) {
|
528 |
s->header.features &= ~QED_F_NEED_CHECK; |
529 |
qed_write_header_sync(s); |
530 |
} |
531 |
|
532 |
qed_free_l2_cache(&s->l2_cache); |
533 |
qemu_vfree(s->l1_table); |
534 |
} |
535 |
|
536 |
static int qed_create(const char *filename, uint32_t cluster_size, |
537 |
uint64_t image_size, uint32_t table_size, |
538 |
const char *backing_file, const char *backing_fmt) |
539 |
{ |
540 |
QEDHeader header = { |
541 |
.magic = QED_MAGIC, |
542 |
.cluster_size = cluster_size, |
543 |
.table_size = table_size, |
544 |
.header_size = 1,
|
545 |
.features = 0,
|
546 |
.compat_features = 0,
|
547 |
.l1_table_offset = cluster_size, |
548 |
.image_size = image_size, |
549 |
}; |
550 |
QEDHeader le_header; |
551 |
uint8_t *l1_table = NULL;
|
552 |
size_t l1_size = header.cluster_size * header.table_size; |
553 |
int ret = 0; |
554 |
BlockDriverState *bs = NULL;
|
555 |
|
556 |
ret = bdrv_create_file(filename, NULL);
|
557 |
if (ret < 0) { |
558 |
return ret;
|
559 |
} |
560 |
|
561 |
ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB); |
562 |
if (ret < 0) { |
563 |
return ret;
|
564 |
} |
565 |
|
566 |
/* File must start empty and grow, check truncate is supported */
|
567 |
ret = bdrv_truncate(bs, 0);
|
568 |
if (ret < 0) { |
569 |
goto out;
|
570 |
} |
571 |
|
572 |
if (backing_file) {
|
573 |
header.features |= QED_F_BACKING_FILE; |
574 |
header.backing_filename_offset = sizeof(le_header);
|
575 |
header.backing_filename_size = strlen(backing_file); |
576 |
|
577 |
if (qed_fmt_is_raw(backing_fmt)) {
|
578 |
header.features |= QED_F_BACKING_FORMAT_NO_PROBE; |
579 |
} |
580 |
} |
581 |
|
582 |
qed_header_cpu_to_le(&header, &le_header); |
583 |
ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); |
584 |
if (ret < 0) { |
585 |
goto out;
|
586 |
} |
587 |
ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
|
588 |
header.backing_filename_size); |
589 |
if (ret < 0) { |
590 |
goto out;
|
591 |
} |
592 |
|
593 |
l1_table = g_malloc0(l1_size); |
594 |
ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); |
595 |
if (ret < 0) { |
596 |
goto out;
|
597 |
} |
598 |
|
599 |
ret = 0; /* success */ |
600 |
out:
|
601 |
g_free(l1_table); |
602 |
bdrv_delete(bs); |
603 |
return ret;
|
604 |
} |
605 |
|
606 |
static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options) |
607 |
{ |
608 |
uint64_t image_size = 0;
|
609 |
uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; |
610 |
uint32_t table_size = QED_DEFAULT_TABLE_SIZE; |
611 |
const char *backing_file = NULL; |
612 |
const char *backing_fmt = NULL; |
613 |
|
614 |
while (options && options->name) {
|
615 |
if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
|
616 |
image_size = options->value.n; |
617 |
} else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { |
618 |
backing_file = options->value.s; |
619 |
} else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { |
620 |
backing_fmt = options->value.s; |
621 |
} else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { |
622 |
if (options->value.n) {
|
623 |
cluster_size = options->value.n; |
624 |
} |
625 |
} else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { |
626 |
if (options->value.n) {
|
627 |
table_size = options->value.n; |
628 |
} |
629 |
} |
630 |
options++; |
631 |
} |
632 |
|
633 |
if (!qed_is_cluster_size_valid(cluster_size)) {
|
634 |
fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
|
635 |
QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); |
636 |
return -EINVAL;
|
637 |
} |
638 |
if (!qed_is_table_size_valid(table_size)) {
|
639 |
fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
|
640 |
QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); |
641 |
return -EINVAL;
|
642 |
} |
643 |
if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
|
644 |
fprintf(stderr, "QED image size must be a non-zero multiple of "
|
645 |
"cluster size and less than %" PRIu64 " bytes\n", |
646 |
qed_max_image_size(cluster_size, table_size)); |
647 |
return -EINVAL;
|
648 |
} |
649 |
|
650 |
return qed_create(filename, cluster_size, image_size, table_size,
|
651 |
backing_file, backing_fmt); |
652 |
} |
653 |
|
654 |
typedef struct { |
655 |
int is_allocated;
|
656 |
int *pnum;
|
657 |
} QEDIsAllocatedCB; |
658 |
|
659 |
static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) |
660 |
{ |
661 |
QEDIsAllocatedCB *cb = opaque; |
662 |
*cb->pnum = len / BDRV_SECTOR_SIZE; |
663 |
cb->is_allocated = (ret == QED_CLUSTER_FOUND || ret == QED_CLUSTER_ZERO); |
664 |
} |
665 |
|
666 |
static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num, |
667 |
int nb_sectors, int *pnum) |
668 |
{ |
669 |
BDRVQEDState *s = bs->opaque; |
670 |
uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; |
671 |
size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; |
672 |
QEDIsAllocatedCB cb = { |
673 |
.is_allocated = -1,
|
674 |
.pnum = pnum, |
675 |
}; |
676 |
QEDRequest request = { .l2_table = NULL };
|
677 |
|
678 |
qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb); |
679 |
|
680 |
while (cb.is_allocated == -1) { |
681 |
qemu_aio_wait(); |
682 |
} |
683 |
|
684 |
qed_unref_l2_cache_entry(request.l2_table); |
685 |
|
686 |
return cb.is_allocated;
|
687 |
} |
688 |
|
689 |
static int bdrv_qed_make_empty(BlockDriverState *bs) |
690 |
{ |
691 |
return -ENOTSUP;
|
692 |
} |
693 |
|
694 |
static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
|
695 |
{ |
696 |
return acb->common.bs->opaque;
|
697 |
} |
698 |
|
699 |
/**
|
700 |
* Read from the backing file or zero-fill if no backing file
|
701 |
*
|
702 |
* @s: QED state
|
703 |
* @pos: Byte position in device
|
704 |
* @qiov: Destination I/O vector
|
705 |
* @cb: Completion function
|
706 |
* @opaque: User data for completion function
|
707 |
*
|
708 |
* This function reads qiov->size bytes starting at pos from the backing file.
|
709 |
* If there is no backing file then zeroes are read.
|
710 |
*/
|
711 |
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, |
712 |
QEMUIOVector *qiov, |
713 |
BlockDriverCompletionFunc *cb, void *opaque)
|
714 |
{ |
715 |
BlockDriverAIOCB *aiocb; |
716 |
uint64_t backing_length = 0;
|
717 |
size_t size; |
718 |
|
719 |
/* If there is a backing file, get its length. Treat the absence of a
|
720 |
* backing file like a zero length backing file.
|
721 |
*/
|
722 |
if (s->bs->backing_hd) {
|
723 |
int64_t l = bdrv_getlength(s->bs->backing_hd); |
724 |
if (l < 0) { |
725 |
cb(opaque, l); |
726 |
return;
|
727 |
} |
728 |
backing_length = l; |
729 |
} |
730 |
|
731 |
/* Zero all sectors if reading beyond the end of the backing file */
|
732 |
if (pos >= backing_length ||
|
733 |
pos + qiov->size > backing_length) { |
734 |
qemu_iovec_memset(qiov, 0, qiov->size);
|
735 |
} |
736 |
|
737 |
/* Complete now if there are no backing file sectors to read */
|
738 |
if (pos >= backing_length) {
|
739 |
cb(opaque, 0);
|
740 |
return;
|
741 |
} |
742 |
|
743 |
/* If the read straddles the end of the backing file, shorten it */
|
744 |
size = MIN((uint64_t)backing_length - pos, qiov->size); |
745 |
|
746 |
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING); |
747 |
aiocb = bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, |
748 |
qiov, size / BDRV_SECTOR_SIZE, cb, opaque); |
749 |
if (!aiocb) {
|
750 |
cb(opaque, -EIO); |
751 |
} |
752 |
} |
753 |
|
754 |
typedef struct { |
755 |
GenericCB gencb; |
756 |
BDRVQEDState *s; |
757 |
QEMUIOVector qiov; |
758 |
struct iovec iov;
|
759 |
uint64_t offset; |
760 |
} CopyFromBackingFileCB; |
761 |
|
762 |
static void qed_copy_from_backing_file_cb(void *opaque, int ret) |
763 |
{ |
764 |
CopyFromBackingFileCB *copy_cb = opaque; |
765 |
qemu_vfree(copy_cb->iov.iov_base); |
766 |
gencb_complete(©_cb->gencb, ret); |
767 |
} |
768 |
|
769 |
static void qed_copy_from_backing_file_write(void *opaque, int ret) |
770 |
{ |
771 |
CopyFromBackingFileCB *copy_cb = opaque; |
772 |
BDRVQEDState *s = copy_cb->s; |
773 |
BlockDriverAIOCB *aiocb; |
774 |
|
775 |
if (ret) {
|
776 |
qed_copy_from_backing_file_cb(copy_cb, ret); |
777 |
return;
|
778 |
} |
779 |
|
780 |
BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); |
781 |
aiocb = bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, |
782 |
©_cb->qiov, |
783 |
copy_cb->qiov.size / BDRV_SECTOR_SIZE, |
784 |
qed_copy_from_backing_file_cb, copy_cb); |
785 |
if (!aiocb) {
|
786 |
qed_copy_from_backing_file_cb(copy_cb, -EIO); |
787 |
} |
788 |
} |
789 |
|
790 |
/**
|
791 |
* Copy data from backing file into the image
|
792 |
*
|
793 |
* @s: QED state
|
794 |
* @pos: Byte position in device
|
795 |
* @len: Number of bytes
|
796 |
* @offset: Byte offset in image file
|
797 |
* @cb: Completion function
|
798 |
* @opaque: User data for completion function
|
799 |
*/
|
800 |
static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, |
801 |
uint64_t len, uint64_t offset, |
802 |
BlockDriverCompletionFunc *cb, |
803 |
void *opaque)
|
804 |
{ |
805 |
CopyFromBackingFileCB *copy_cb; |
806 |
|
807 |
/* Skip copy entirely if there is no work to do */
|
808 |
if (len == 0) { |
809 |
cb(opaque, 0);
|
810 |
return;
|
811 |
} |
812 |
|
813 |
copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
|
814 |
copy_cb->s = s; |
815 |
copy_cb->offset = offset; |
816 |
copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); |
817 |
copy_cb->iov.iov_len = len; |
818 |
qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1);
|
819 |
|
820 |
qed_read_backing_file(s, pos, ©_cb->qiov, |
821 |
qed_copy_from_backing_file_write, copy_cb); |
822 |
} |
823 |
|
824 |
/**
|
825 |
* Link one or more contiguous clusters into a table
|
826 |
*
|
827 |
* @s: QED state
|
828 |
* @table: L2 table
|
829 |
* @index: First cluster index
|
830 |
* @n: Number of contiguous clusters
|
831 |
* @cluster: First cluster offset
|
832 |
*
|
833 |
* The cluster offset may be an allocated byte offset in the image file, the
|
834 |
* zero cluster marker, or the unallocated cluster marker.
|
835 |
*/
|
836 |
static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, |
837 |
unsigned int n, uint64_t cluster) |
838 |
{ |
839 |
int i;
|
840 |
for (i = index; i < index + n; i++) {
|
841 |
table->offsets[i] = cluster; |
842 |
if (!qed_offset_is_unalloc_cluster(cluster) &&
|
843 |
!qed_offset_is_zero_cluster(cluster)) { |
844 |
cluster += s->header.cluster_size; |
845 |
} |
846 |
} |
847 |
} |
848 |
|
849 |
static void qed_aio_complete_bh(void *opaque) |
850 |
{ |
851 |
QEDAIOCB *acb = opaque; |
852 |
BlockDriverCompletionFunc *cb = acb->common.cb; |
853 |
void *user_opaque = acb->common.opaque;
|
854 |
int ret = acb->bh_ret;
|
855 |
bool *finished = acb->finished;
|
856 |
|
857 |
qemu_bh_delete(acb->bh); |
858 |
qemu_aio_release(acb); |
859 |
|
860 |
/* Invoke callback */
|
861 |
cb(user_opaque, ret); |
862 |
|
863 |
/* Signal cancel completion */
|
864 |
if (finished) {
|
865 |
*finished = true;
|
866 |
} |
867 |
} |
868 |
|
869 |
static void qed_aio_complete(QEDAIOCB *acb, int ret) |
870 |
{ |
871 |
BDRVQEDState *s = acb_to_s(acb); |
872 |
|
873 |
trace_qed_aio_complete(s, acb, ret); |
874 |
|
875 |
/* Free resources */
|
876 |
qemu_iovec_destroy(&acb->cur_qiov); |
877 |
qed_unref_l2_cache_entry(acb->request.l2_table); |
878 |
|
879 |
/* Arrange for a bh to invoke the completion function */
|
880 |
acb->bh_ret = ret; |
881 |
acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); |
882 |
qemu_bh_schedule(acb->bh); |
883 |
|
884 |
/* Start next allocating write request waiting behind this one. Note that
|
885 |
* requests enqueue themselves when they first hit an unallocated cluster
|
886 |
* but they wait until the entire request is finished before waking up the
|
887 |
* next request in the queue. This ensures that we don't cycle through
|
888 |
* requests multiple times but rather finish one at a time completely.
|
889 |
*/
|
890 |
if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
|
891 |
QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); |
892 |
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
893 |
if (acb) {
|
894 |
qed_aio_next_io(acb, 0);
|
895 |
} else if (s->header.features & QED_F_NEED_CHECK) { |
896 |
qed_start_need_check_timer(s); |
897 |
} |
898 |
} |
899 |
} |
900 |
|
901 |
/**
|
902 |
* Commit the current L2 table to the cache
|
903 |
*/
|
904 |
static void qed_commit_l2_update(void *opaque, int ret) |
905 |
{ |
906 |
QEDAIOCB *acb = opaque; |
907 |
BDRVQEDState *s = acb_to_s(acb); |
908 |
CachedL2Table *l2_table = acb->request.l2_table; |
909 |
uint64_t l2_offset = l2_table->offset; |
910 |
|
911 |
qed_commit_l2_cache_entry(&s->l2_cache, l2_table); |
912 |
|
913 |
/* This is guaranteed to succeed because we just committed the entry to the
|
914 |
* cache.
|
915 |
*/
|
916 |
acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); |
917 |
assert(acb->request.l2_table != NULL);
|
918 |
|
919 |
qed_aio_next_io(opaque, ret); |
920 |
} |
921 |
|
922 |
/**
|
923 |
* Update L1 table with new L2 table offset and write it out
|
924 |
*/
|
925 |
static void qed_aio_write_l1_update(void *opaque, int ret) |
926 |
{ |
927 |
QEDAIOCB *acb = opaque; |
928 |
BDRVQEDState *s = acb_to_s(acb); |
929 |
int index;
|
930 |
|
931 |
if (ret) {
|
932 |
qed_aio_complete(acb, ret); |
933 |
return;
|
934 |
} |
935 |
|
936 |
index = qed_l1_index(s, acb->cur_pos); |
937 |
s->l1_table->offsets[index] = acb->request.l2_table->offset; |
938 |
|
939 |
qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
|
940 |
} |
941 |
|
942 |
/**
|
943 |
* Update L2 table with new cluster offsets and write them out
|
944 |
*/
|
945 |
static void qed_aio_write_l2_update(void *opaque, int ret) |
946 |
{ |
947 |
QEDAIOCB *acb = opaque; |
948 |
BDRVQEDState *s = acb_to_s(acb); |
949 |
bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
|
950 |
int index;
|
951 |
|
952 |
if (ret) {
|
953 |
goto err;
|
954 |
} |
955 |
|
956 |
if (need_alloc) {
|
957 |
qed_unref_l2_cache_entry(acb->request.l2_table); |
958 |
acb->request.l2_table = qed_new_l2_table(s); |
959 |
} |
960 |
|
961 |
index = qed_l2_index(s, acb->cur_pos); |
962 |
qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, |
963 |
acb->cur_cluster); |
964 |
|
965 |
if (need_alloc) {
|
966 |
/* Write out the whole new L2 table */
|
967 |
qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, |
968 |
qed_aio_write_l1_update, acb); |
969 |
} else {
|
970 |
/* Write out only the updated part of the L2 table */
|
971 |
qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
|
972 |
qed_aio_next_io, acb); |
973 |
} |
974 |
return;
|
975 |
|
976 |
err:
|
977 |
qed_aio_complete(acb, ret); |
978 |
} |
979 |
|
980 |
/**
|
981 |
* Flush new data clusters before updating the L2 table
|
982 |
*
|
983 |
* This flush is necessary when a backing file is in use. A crash during an
|
984 |
* allocating write could result in empty clusters in the image. If the write
|
985 |
* only touched a subregion of the cluster, then backing image sectors have
|
986 |
* been lost in the untouched region. The solution is to flush after writing a
|
987 |
* new data cluster and before updating the L2 table.
|
988 |
*/
|
989 |
static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) |
990 |
{ |
991 |
QEDAIOCB *acb = opaque; |
992 |
BDRVQEDState *s = acb_to_s(acb); |
993 |
|
994 |
if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update, opaque)) {
|
995 |
qed_aio_complete(acb, -EIO); |
996 |
} |
997 |
} |
998 |
|
999 |
/**
|
1000 |
* Write data to the image file
|
1001 |
*/
|
1002 |
static void qed_aio_write_main(void *opaque, int ret) |
1003 |
{ |
1004 |
QEDAIOCB *acb = opaque; |
1005 |
BDRVQEDState *s = acb_to_s(acb); |
1006 |
uint64_t offset = acb->cur_cluster + |
1007 |
qed_offset_into_cluster(s, acb->cur_pos); |
1008 |
BlockDriverCompletionFunc *next_fn; |
1009 |
BlockDriverAIOCB *file_acb; |
1010 |
|
1011 |
trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); |
1012 |
|
1013 |
if (ret) {
|
1014 |
qed_aio_complete(acb, ret); |
1015 |
return;
|
1016 |
} |
1017 |
|
1018 |
if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
|
1019 |
next_fn = qed_aio_next_io; |
1020 |
} else {
|
1021 |
if (s->bs->backing_hd) {
|
1022 |
next_fn = qed_aio_write_flush_before_l2_update; |
1023 |
} else {
|
1024 |
next_fn = qed_aio_write_l2_update; |
1025 |
} |
1026 |
} |
1027 |
|
1028 |
BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); |
1029 |
file_acb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, |
1030 |
&acb->cur_qiov, |
1031 |
acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
1032 |
next_fn, acb); |
1033 |
if (!file_acb) {
|
1034 |
qed_aio_complete(acb, -EIO); |
1035 |
} |
1036 |
} |
1037 |
|
1038 |
/**
|
1039 |
* Populate back untouched region of new data cluster
|
1040 |
*/
|
1041 |
static void qed_aio_write_postfill(void *opaque, int ret) |
1042 |
{ |
1043 |
QEDAIOCB *acb = opaque; |
1044 |
BDRVQEDState *s = acb_to_s(acb); |
1045 |
uint64_t start = acb->cur_pos + acb->cur_qiov.size; |
1046 |
uint64_t len = |
1047 |
qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
|
1048 |
uint64_t offset = acb->cur_cluster + |
1049 |
qed_offset_into_cluster(s, acb->cur_pos) + |
1050 |
acb->cur_qiov.size; |
1051 |
|
1052 |
if (ret) {
|
1053 |
qed_aio_complete(acb, ret); |
1054 |
return;
|
1055 |
} |
1056 |
|
1057 |
trace_qed_aio_write_postfill(s, acb, start, len, offset); |
1058 |
qed_copy_from_backing_file(s, start, len, offset, |
1059 |
qed_aio_write_main, acb); |
1060 |
} |
1061 |
|
1062 |
/**
|
1063 |
* Populate front untouched region of new data cluster
|
1064 |
*/
|
1065 |
static void qed_aio_write_prefill(void *opaque, int ret) |
1066 |
{ |
1067 |
QEDAIOCB *acb = opaque; |
1068 |
BDRVQEDState *s = acb_to_s(acb); |
1069 |
uint64_t start = qed_start_of_cluster(s, acb->cur_pos); |
1070 |
uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); |
1071 |
|
1072 |
trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); |
1073 |
qed_copy_from_backing_file(s, start, len, acb->cur_cluster, |
1074 |
qed_aio_write_postfill, acb); |
1075 |
} |
1076 |
|
1077 |
/**
|
1078 |
* Check if the QED_F_NEED_CHECK bit should be set during allocating write
|
1079 |
*/
|
1080 |
static bool qed_should_set_need_check(BDRVQEDState *s) |
1081 |
{ |
1082 |
/* The flush before L2 update path ensures consistency */
|
1083 |
if (s->bs->backing_hd) {
|
1084 |
return false; |
1085 |
} |
1086 |
|
1087 |
return !(s->header.features & QED_F_NEED_CHECK);
|
1088 |
} |
1089 |
|
1090 |
/**
|
1091 |
* Write new data cluster
|
1092 |
*
|
1093 |
* @acb: Write request
|
1094 |
* @len: Length in bytes
|
1095 |
*
|
1096 |
* This path is taken when writing to previously unallocated clusters.
|
1097 |
*/
|
1098 |
static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
1099 |
{ |
1100 |
BDRVQEDState *s = acb_to_s(acb); |
1101 |
|
1102 |
/* Cancel timer when the first allocating request comes in */
|
1103 |
if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
|
1104 |
qed_cancel_need_check_timer(s); |
1105 |
} |
1106 |
|
1107 |
/* Freeze this request if another allocating write is in progress */
|
1108 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
|
1109 |
QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); |
1110 |
} |
1111 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
|
1112 |
s->allocating_write_reqs_plugged) { |
1113 |
return; /* wait for existing request to finish */ |
1114 |
} |
1115 |
|
1116 |
acb->cur_nclusters = qed_bytes_to_clusters(s, |
1117 |
qed_offset_into_cluster(s, acb->cur_pos) + len); |
1118 |
acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); |
1119 |
qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1120 |
|
1121 |
if (qed_should_set_need_check(s)) {
|
1122 |
s->header.features |= QED_F_NEED_CHECK; |
1123 |
qed_write_header(s, qed_aio_write_prefill, acb); |
1124 |
} else {
|
1125 |
qed_aio_write_prefill(acb, 0);
|
1126 |
} |
1127 |
} |
1128 |
|
1129 |
/**
|
1130 |
* Write data cluster in place
|
1131 |
*
|
1132 |
* @acb: Write request
|
1133 |
* @offset: Cluster offset in bytes
|
1134 |
* @len: Length in bytes
|
1135 |
*
|
1136 |
* This path is taken when writing to already allocated clusters.
|
1137 |
*/
|
1138 |
static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) |
1139 |
{ |
1140 |
/* Calculate the I/O vector */
|
1141 |
acb->cur_cluster = offset; |
1142 |
qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1143 |
|
1144 |
/* Do the actual write */
|
1145 |
qed_aio_write_main(acb, 0);
|
1146 |
} |
1147 |
|
1148 |
/**
|
1149 |
* Write data cluster
|
1150 |
*
|
1151 |
* @opaque: Write request
|
1152 |
* @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
|
1153 |
* or -errno
|
1154 |
* @offset: Cluster offset in bytes
|
1155 |
* @len: Length in bytes
|
1156 |
*
|
1157 |
* Callback from qed_find_cluster().
|
1158 |
*/
|
1159 |
static void qed_aio_write_data(void *opaque, int ret, |
1160 |
uint64_t offset, size_t len) |
1161 |
{ |
1162 |
QEDAIOCB *acb = opaque; |
1163 |
|
1164 |
trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); |
1165 |
|
1166 |
acb->find_cluster_ret = ret; |
1167 |
|
1168 |
switch (ret) {
|
1169 |
case QED_CLUSTER_FOUND:
|
1170 |
qed_aio_write_inplace(acb, offset, len); |
1171 |
break;
|
1172 |
|
1173 |
case QED_CLUSTER_L2:
|
1174 |
case QED_CLUSTER_L1:
|
1175 |
case QED_CLUSTER_ZERO:
|
1176 |
qed_aio_write_alloc(acb, len); |
1177 |
break;
|
1178 |
|
1179 |
default:
|
1180 |
qed_aio_complete(acb, ret); |
1181 |
break;
|
1182 |
} |
1183 |
} |
1184 |
|
1185 |
/**
|
1186 |
* Read data cluster
|
1187 |
*
|
1188 |
* @opaque: Read request
|
1189 |
* @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
|
1190 |
* or -errno
|
1191 |
* @offset: Cluster offset in bytes
|
1192 |
* @len: Length in bytes
|
1193 |
*
|
1194 |
* Callback from qed_find_cluster().
|
1195 |
*/
|
1196 |
static void qed_aio_read_data(void *opaque, int ret, |
1197 |
uint64_t offset, size_t len) |
1198 |
{ |
1199 |
QEDAIOCB *acb = opaque; |
1200 |
BDRVQEDState *s = acb_to_s(acb); |
1201 |
BlockDriverState *bs = acb->common.bs; |
1202 |
BlockDriverAIOCB *file_acb; |
1203 |
|
1204 |
/* Adjust offset into cluster */
|
1205 |
offset += qed_offset_into_cluster(s, acb->cur_pos); |
1206 |
|
1207 |
trace_qed_aio_read_data(s, acb, ret, offset, len); |
1208 |
|
1209 |
if (ret < 0) { |
1210 |
goto err;
|
1211 |
} |
1212 |
|
1213 |
qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1214 |
|
1215 |
/* Handle zero cluster and backing file reads */
|
1216 |
if (ret == QED_CLUSTER_ZERO) {
|
1217 |
qemu_iovec_memset(&acb->cur_qiov, 0, acb->cur_qiov.size);
|
1218 |
qed_aio_next_io(acb, 0);
|
1219 |
return;
|
1220 |
} else if (ret != QED_CLUSTER_FOUND) { |
1221 |
qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, |
1222 |
qed_aio_next_io, acb); |
1223 |
return;
|
1224 |
} |
1225 |
|
1226 |
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); |
1227 |
file_acb = bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, |
1228 |
&acb->cur_qiov, |
1229 |
acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
1230 |
qed_aio_next_io, acb); |
1231 |
if (!file_acb) {
|
1232 |
ret = -EIO; |
1233 |
goto err;
|
1234 |
} |
1235 |
return;
|
1236 |
|
1237 |
err:
|
1238 |
qed_aio_complete(acb, ret); |
1239 |
} |
1240 |
|
1241 |
/**
|
1242 |
* Begin next I/O or complete the request
|
1243 |
*/
|
1244 |
static void qed_aio_next_io(void *opaque, int ret) |
1245 |
{ |
1246 |
QEDAIOCB *acb = opaque; |
1247 |
BDRVQEDState *s = acb_to_s(acb); |
1248 |
QEDFindClusterFunc *io_fn = |
1249 |
acb->is_write ? qed_aio_write_data : qed_aio_read_data; |
1250 |
|
1251 |
trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); |
1252 |
|
1253 |
/* Handle I/O error */
|
1254 |
if (ret) {
|
1255 |
qed_aio_complete(acb, ret); |
1256 |
return;
|
1257 |
} |
1258 |
|
1259 |
acb->qiov_offset += acb->cur_qiov.size; |
1260 |
acb->cur_pos += acb->cur_qiov.size; |
1261 |
qemu_iovec_reset(&acb->cur_qiov); |
1262 |
|
1263 |
/* Complete request */
|
1264 |
if (acb->cur_pos >= acb->end_pos) {
|
1265 |
qed_aio_complete(acb, 0);
|
1266 |
return;
|
1267 |
} |
1268 |
|
1269 |
/* Find next cluster and start I/O */
|
1270 |
qed_find_cluster(s, &acb->request, |
1271 |
acb->cur_pos, acb->end_pos - acb->cur_pos, |
1272 |
io_fn, acb); |
1273 |
} |
1274 |
|
1275 |
static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
|
1276 |
int64_t sector_num, |
1277 |
QEMUIOVector *qiov, int nb_sectors,
|
1278 |
BlockDriverCompletionFunc *cb, |
1279 |
void *opaque, bool is_write) |
1280 |
{ |
1281 |
QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque); |
1282 |
|
1283 |
trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, |
1284 |
opaque, is_write); |
1285 |
|
1286 |
acb->is_write = is_write; |
1287 |
acb->finished = NULL;
|
1288 |
acb->qiov = qiov; |
1289 |
acb->qiov_offset = 0;
|
1290 |
acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; |
1291 |
acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; |
1292 |
acb->request.l2_table = NULL;
|
1293 |
qemu_iovec_init(&acb->cur_qiov, qiov->niov); |
1294 |
|
1295 |
/* Start request */
|
1296 |
qed_aio_next_io(acb, 0);
|
1297 |
return &acb->common;
|
1298 |
} |
1299 |
|
1300 |
static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
|
1301 |
int64_t sector_num, |
1302 |
QEMUIOVector *qiov, int nb_sectors,
|
1303 |
BlockDriverCompletionFunc *cb, |
1304 |
void *opaque)
|
1305 |
{ |
1306 |
return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, false); |
1307 |
} |
1308 |
|
1309 |
static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
|
1310 |
int64_t sector_num, |
1311 |
QEMUIOVector *qiov, int nb_sectors,
|
1312 |
BlockDriverCompletionFunc *cb, |
1313 |
void *opaque)
|
1314 |
{ |
1315 |
return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, true); |
1316 |
} |
1317 |
|
1318 |
static BlockDriverAIOCB *bdrv_qed_aio_flush(BlockDriverState *bs,
|
1319 |
BlockDriverCompletionFunc *cb, |
1320 |
void *opaque)
|
1321 |
{ |
1322 |
return bdrv_aio_flush(bs->file, cb, opaque);
|
1323 |
} |
1324 |
|
1325 |
static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) |
1326 |
{ |
1327 |
BDRVQEDState *s = bs->opaque; |
1328 |
uint64_t old_image_size; |
1329 |
int ret;
|
1330 |
|
1331 |
if (!qed_is_image_size_valid(offset, s->header.cluster_size,
|
1332 |
s->header.table_size)) { |
1333 |
return -EINVAL;
|
1334 |
} |
1335 |
|
1336 |
/* Shrinking is currently not supported */
|
1337 |
if ((uint64_t)offset < s->header.image_size) {
|
1338 |
return -ENOTSUP;
|
1339 |
} |
1340 |
|
1341 |
old_image_size = s->header.image_size; |
1342 |
s->header.image_size = offset; |
1343 |
ret = qed_write_header_sync(s); |
1344 |
if (ret < 0) { |
1345 |
s->header.image_size = old_image_size; |
1346 |
} |
1347 |
return ret;
|
1348 |
} |
1349 |
|
1350 |
static int64_t bdrv_qed_getlength(BlockDriverState *bs)
|
1351 |
{ |
1352 |
BDRVQEDState *s = bs->opaque; |
1353 |
return s->header.image_size;
|
1354 |
} |
1355 |
|
1356 |
static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) |
1357 |
{ |
1358 |
BDRVQEDState *s = bs->opaque; |
1359 |
|
1360 |
memset(bdi, 0, sizeof(*bdi)); |
1361 |
bdi->cluster_size = s->header.cluster_size; |
1362 |
return 0; |
1363 |
} |
1364 |
|
1365 |
static int bdrv_qed_change_backing_file(BlockDriverState *bs, |
1366 |
const char *backing_file, |
1367 |
const char *backing_fmt) |
1368 |
{ |
1369 |
BDRVQEDState *s = bs->opaque; |
1370 |
QEDHeader new_header, le_header; |
1371 |
void *buffer;
|
1372 |
size_t buffer_len, backing_file_len; |
1373 |
int ret;
|
1374 |
|
1375 |
/* Refuse to set backing filename if unknown compat feature bits are
|
1376 |
* active. If the image uses an unknown compat feature then we may not
|
1377 |
* know the layout of data following the header structure and cannot safely
|
1378 |
* add a new string.
|
1379 |
*/
|
1380 |
if (backing_file && (s->header.compat_features &
|
1381 |
~QED_COMPAT_FEATURE_MASK)) { |
1382 |
return -ENOTSUP;
|
1383 |
} |
1384 |
|
1385 |
memcpy(&new_header, &s->header, sizeof(new_header));
|
1386 |
|
1387 |
new_header.features &= ~(QED_F_BACKING_FILE | |
1388 |
QED_F_BACKING_FORMAT_NO_PROBE); |
1389 |
|
1390 |
/* Adjust feature flags */
|
1391 |
if (backing_file) {
|
1392 |
new_header.features |= QED_F_BACKING_FILE; |
1393 |
|
1394 |
if (qed_fmt_is_raw(backing_fmt)) {
|
1395 |
new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; |
1396 |
} |
1397 |
} |
1398 |
|
1399 |
/* Calculate new header size */
|
1400 |
backing_file_len = 0;
|
1401 |
|
1402 |
if (backing_file) {
|
1403 |
backing_file_len = strlen(backing_file); |
1404 |
} |
1405 |
|
1406 |
buffer_len = sizeof(new_header);
|
1407 |
new_header.backing_filename_offset = buffer_len; |
1408 |
new_header.backing_filename_size = backing_file_len; |
1409 |
buffer_len += backing_file_len; |
1410 |
|
1411 |
/* Make sure we can rewrite header without failing */
|
1412 |
if (buffer_len > new_header.header_size * new_header.cluster_size) {
|
1413 |
return -ENOSPC;
|
1414 |
} |
1415 |
|
1416 |
/* Prepare new header */
|
1417 |
buffer = g_malloc(buffer_len); |
1418 |
|
1419 |
qed_header_cpu_to_le(&new_header, &le_header); |
1420 |
memcpy(buffer, &le_header, sizeof(le_header));
|
1421 |
buffer_len = sizeof(le_header);
|
1422 |
|
1423 |
memcpy(buffer + buffer_len, backing_file, backing_file_len); |
1424 |
buffer_len += backing_file_len; |
1425 |
|
1426 |
/* Write new header */
|
1427 |
ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
|
1428 |
g_free(buffer); |
1429 |
if (ret == 0) { |
1430 |
memcpy(&s->header, &new_header, sizeof(new_header));
|
1431 |
} |
1432 |
return ret;
|
1433 |
} |
1434 |
|
1435 |
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result) |
1436 |
{ |
1437 |
BDRVQEDState *s = bs->opaque; |
1438 |
|
1439 |
return qed_check(s, result, false); |
1440 |
} |
1441 |
|
1442 |
static QEMUOptionParameter qed_create_options[] = {
|
1443 |
{ |
1444 |
.name = BLOCK_OPT_SIZE, |
1445 |
.type = OPT_SIZE, |
1446 |
.help = "Virtual disk size (in bytes)"
|
1447 |
}, { |
1448 |
.name = BLOCK_OPT_BACKING_FILE, |
1449 |
.type = OPT_STRING, |
1450 |
.help = "File name of a base image"
|
1451 |
}, { |
1452 |
.name = BLOCK_OPT_BACKING_FMT, |
1453 |
.type = OPT_STRING, |
1454 |
.help = "Image format of the base image"
|
1455 |
}, { |
1456 |
.name = BLOCK_OPT_CLUSTER_SIZE, |
1457 |
.type = OPT_SIZE, |
1458 |
.help = "Cluster size (in bytes)",
|
1459 |
.value = { .n = QED_DEFAULT_CLUSTER_SIZE }, |
1460 |
}, { |
1461 |
.name = BLOCK_OPT_TABLE_SIZE, |
1462 |
.type = OPT_SIZE, |
1463 |
.help = "L1/L2 table size (in clusters)"
|
1464 |
}, |
1465 |
{ /* end of list */ }
|
1466 |
}; |
1467 |
|
1468 |
static BlockDriver bdrv_qed = {
|
1469 |
.format_name = "qed",
|
1470 |
.instance_size = sizeof(BDRVQEDState),
|
1471 |
.create_options = qed_create_options, |
1472 |
|
1473 |
.bdrv_probe = bdrv_qed_probe, |
1474 |
.bdrv_open = bdrv_qed_open, |
1475 |
.bdrv_close = bdrv_qed_close, |
1476 |
.bdrv_create = bdrv_qed_create, |
1477 |
.bdrv_is_allocated = bdrv_qed_is_allocated, |
1478 |
.bdrv_make_empty = bdrv_qed_make_empty, |
1479 |
.bdrv_aio_readv = bdrv_qed_aio_readv, |
1480 |
.bdrv_aio_writev = bdrv_qed_aio_writev, |
1481 |
.bdrv_aio_flush = bdrv_qed_aio_flush, |
1482 |
.bdrv_truncate = bdrv_qed_truncate, |
1483 |
.bdrv_getlength = bdrv_qed_getlength, |
1484 |
.bdrv_get_info = bdrv_qed_get_info, |
1485 |
.bdrv_change_backing_file = bdrv_qed_change_backing_file, |
1486 |
.bdrv_check = bdrv_qed_check, |
1487 |
}; |
1488 |
|
1489 |
static void bdrv_qed_init(void) |
1490 |
{ |
1491 |
bdrv_register(&bdrv_qed); |
1492 |
} |
1493 |
|
1494 |
block_init(bdrv_qed_init); |