root / block / qed.c @ d5124c00
History | View | Annotate | Download (46.6 kB)
1 |
/*
|
---|---|
2 |
* QEMU Enhanced Disk Format
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2010
|
5 |
*
|
6 |
* Authors:
|
7 |
* Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
|
8 |
* Anthony Liguori <aliguori@us.ibm.com>
|
9 |
*
|
10 |
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
11 |
* See the COPYING.LIB file in the top-level directory.
|
12 |
*
|
13 |
*/
|
14 |
|
15 |
#include "qemu/timer.h" |
16 |
#include "trace.h" |
17 |
#include "qed.h" |
18 |
#include "qapi/qmp/qerror.h" |
19 |
#include "migration/migration.h" |
20 |
|
21 |
static void qed_aio_cancel(BlockDriverAIOCB *blockacb) |
22 |
{ |
23 |
QEDAIOCB *acb = (QEDAIOCB *)blockacb; |
24 |
bool finished = false; |
25 |
|
26 |
/* Wait for the request to finish */
|
27 |
acb->finished = &finished; |
28 |
while (!finished) {
|
29 |
qemu_aio_wait(); |
30 |
} |
31 |
} |
32 |
|
33 |
static const AIOCBInfo qed_aiocb_info = { |
34 |
.aiocb_size = sizeof(QEDAIOCB),
|
35 |
.cancel = qed_aio_cancel, |
36 |
}; |
37 |
|
38 |
static int bdrv_qed_probe(const uint8_t *buf, int buf_size, |
39 |
const char *filename) |
40 |
{ |
41 |
const QEDHeader *header = (const QEDHeader *)buf; |
42 |
|
43 |
if (buf_size < sizeof(*header)) { |
44 |
return 0; |
45 |
} |
46 |
if (le32_to_cpu(header->magic) != QED_MAGIC) {
|
47 |
return 0; |
48 |
} |
49 |
return 100; |
50 |
} |
51 |
|
52 |
/**
|
53 |
* Check whether an image format is raw
|
54 |
*
|
55 |
* @fmt: Backing file format, may be NULL
|
56 |
*/
|
57 |
static bool qed_fmt_is_raw(const char *fmt) |
58 |
{ |
59 |
return fmt && strcmp(fmt, "raw") == 0; |
60 |
} |
61 |
|
62 |
static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) |
63 |
{ |
64 |
cpu->magic = le32_to_cpu(le->magic); |
65 |
cpu->cluster_size = le32_to_cpu(le->cluster_size); |
66 |
cpu->table_size = le32_to_cpu(le->table_size); |
67 |
cpu->header_size = le32_to_cpu(le->header_size); |
68 |
cpu->features = le64_to_cpu(le->features); |
69 |
cpu->compat_features = le64_to_cpu(le->compat_features); |
70 |
cpu->autoclear_features = le64_to_cpu(le->autoclear_features); |
71 |
cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); |
72 |
cpu->image_size = le64_to_cpu(le->image_size); |
73 |
cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); |
74 |
cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); |
75 |
} |
76 |
|
77 |
static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) |
78 |
{ |
79 |
le->magic = cpu_to_le32(cpu->magic); |
80 |
le->cluster_size = cpu_to_le32(cpu->cluster_size); |
81 |
le->table_size = cpu_to_le32(cpu->table_size); |
82 |
le->header_size = cpu_to_le32(cpu->header_size); |
83 |
le->features = cpu_to_le64(cpu->features); |
84 |
le->compat_features = cpu_to_le64(cpu->compat_features); |
85 |
le->autoclear_features = cpu_to_le64(cpu->autoclear_features); |
86 |
le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); |
87 |
le->image_size = cpu_to_le64(cpu->image_size); |
88 |
le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); |
89 |
le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); |
90 |
} |
91 |
|
92 |
int qed_write_header_sync(BDRVQEDState *s)
|
93 |
{ |
94 |
QEDHeader le; |
95 |
int ret;
|
96 |
|
97 |
qed_header_cpu_to_le(&s->header, &le); |
98 |
ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); |
99 |
if (ret != sizeof(le)) { |
100 |
return ret;
|
101 |
} |
102 |
return 0; |
103 |
} |
104 |
|
105 |
typedef struct { |
106 |
GenericCB gencb; |
107 |
BDRVQEDState *s; |
108 |
struct iovec iov;
|
109 |
QEMUIOVector qiov; |
110 |
int nsectors;
|
111 |
uint8_t *buf; |
112 |
} QEDWriteHeaderCB; |
113 |
|
114 |
static void qed_write_header_cb(void *opaque, int ret) |
115 |
{ |
116 |
QEDWriteHeaderCB *write_header_cb = opaque; |
117 |
|
118 |
qemu_vfree(write_header_cb->buf); |
119 |
gencb_complete(write_header_cb, ret); |
120 |
} |
121 |
|
122 |
static void qed_write_header_read_cb(void *opaque, int ret) |
123 |
{ |
124 |
QEDWriteHeaderCB *write_header_cb = opaque; |
125 |
BDRVQEDState *s = write_header_cb->s; |
126 |
|
127 |
if (ret) {
|
128 |
qed_write_header_cb(write_header_cb, ret); |
129 |
return;
|
130 |
} |
131 |
|
132 |
/* Update header */
|
133 |
qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); |
134 |
|
135 |
bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
|
136 |
write_header_cb->nsectors, qed_write_header_cb, |
137 |
write_header_cb); |
138 |
} |
139 |
|
140 |
/**
|
141 |
* Update header in-place (does not rewrite backing filename or other strings)
|
142 |
*
|
143 |
* This function only updates known header fields in-place and does not affect
|
144 |
* extra data after the QED header.
|
145 |
*/
|
146 |
static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb, |
147 |
void *opaque)
|
148 |
{ |
149 |
/* We must write full sectors for O_DIRECT but cannot necessarily generate
|
150 |
* the data following the header if an unrecognized compat feature is
|
151 |
* active. Therefore, first read the sectors containing the header, update
|
152 |
* them, and write back.
|
153 |
*/
|
154 |
|
155 |
int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / |
156 |
BDRV_SECTOR_SIZE; |
157 |
size_t len = nsectors * BDRV_SECTOR_SIZE; |
158 |
QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
|
159 |
cb, opaque); |
160 |
|
161 |
write_header_cb->s = s; |
162 |
write_header_cb->nsectors = nsectors; |
163 |
write_header_cb->buf = qemu_blockalign(s->bs, len); |
164 |
write_header_cb->iov.iov_base = write_header_cb->buf; |
165 |
write_header_cb->iov.iov_len = len; |
166 |
qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
|
167 |
|
168 |
bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
|
169 |
qed_write_header_read_cb, write_header_cb); |
170 |
} |
171 |
|
172 |
static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
|
173 |
{ |
174 |
uint64_t table_entries; |
175 |
uint64_t l2_size; |
176 |
|
177 |
table_entries = (table_size * cluster_size) / sizeof(uint64_t);
|
178 |
l2_size = table_entries * cluster_size; |
179 |
|
180 |
return l2_size * table_entries;
|
181 |
} |
182 |
|
183 |
static bool qed_is_cluster_size_valid(uint32_t cluster_size) |
184 |
{ |
185 |
if (cluster_size < QED_MIN_CLUSTER_SIZE ||
|
186 |
cluster_size > QED_MAX_CLUSTER_SIZE) { |
187 |
return false; |
188 |
} |
189 |
if (cluster_size & (cluster_size - 1)) { |
190 |
return false; /* not power of 2 */ |
191 |
} |
192 |
return true; |
193 |
} |
194 |
|
195 |
static bool qed_is_table_size_valid(uint32_t table_size) |
196 |
{ |
197 |
if (table_size < QED_MIN_TABLE_SIZE ||
|
198 |
table_size > QED_MAX_TABLE_SIZE) { |
199 |
return false; |
200 |
} |
201 |
if (table_size & (table_size - 1)) { |
202 |
return false; /* not power of 2 */ |
203 |
} |
204 |
return true; |
205 |
} |
206 |
|
207 |
static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, |
208 |
uint32_t table_size) |
209 |
{ |
210 |
if (image_size % BDRV_SECTOR_SIZE != 0) { |
211 |
return false; /* not multiple of sector size */ |
212 |
} |
213 |
if (image_size > qed_max_image_size(cluster_size, table_size)) {
|
214 |
return false; /* image is too large */ |
215 |
} |
216 |
return true; |
217 |
} |
218 |
|
219 |
/**
|
220 |
* Read a string of known length from the image file
|
221 |
*
|
222 |
* @file: Image file
|
223 |
* @offset: File offset to start of string, in bytes
|
224 |
* @n: String length in bytes
|
225 |
* @buf: Destination buffer
|
226 |
* @buflen: Destination buffer length in bytes
|
227 |
* @ret: 0 on success, -errno on failure
|
228 |
*
|
229 |
* The string is NUL-terminated.
|
230 |
*/
|
231 |
static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, |
232 |
char *buf, size_t buflen)
|
233 |
{ |
234 |
int ret;
|
235 |
if (n >= buflen) {
|
236 |
return -EINVAL;
|
237 |
} |
238 |
ret = bdrv_pread(file, offset, buf, n); |
239 |
if (ret < 0) { |
240 |
return ret;
|
241 |
} |
242 |
buf[n] = '\0';
|
243 |
return 0; |
244 |
} |
245 |
|
246 |
/**
|
247 |
* Allocate new clusters
|
248 |
*
|
249 |
* @s: QED state
|
250 |
* @n: Number of contiguous clusters to allocate
|
251 |
* @ret: Offset of first allocated cluster
|
252 |
*
|
253 |
* This function only produces the offset where the new clusters should be
|
254 |
* written. It updates BDRVQEDState but does not make any changes to the image
|
255 |
* file.
|
256 |
*/
|
257 |
static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) |
258 |
{ |
259 |
uint64_t offset = s->file_size; |
260 |
s->file_size += n * s->header.cluster_size; |
261 |
return offset;
|
262 |
} |
263 |
|
264 |
QEDTable *qed_alloc_table(BDRVQEDState *s) |
265 |
{ |
266 |
/* Honor O_DIRECT memory alignment requirements */
|
267 |
return qemu_blockalign(s->bs,
|
268 |
s->header.cluster_size * s->header.table_size); |
269 |
} |
270 |
|
271 |
/**
|
272 |
* Allocate a new zeroed L2 table
|
273 |
*/
|
274 |
static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
|
275 |
{ |
276 |
CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); |
277 |
|
278 |
l2_table->table = qed_alloc_table(s); |
279 |
l2_table->offset = qed_alloc_clusters(s, s->header.table_size); |
280 |
|
281 |
memset(l2_table->table->offsets, 0,
|
282 |
s->header.cluster_size * s->header.table_size); |
283 |
return l2_table;
|
284 |
} |
285 |
|
286 |
static void qed_aio_next_io(void *opaque, int ret); |
287 |
|
288 |
static void qed_plug_allocating_write_reqs(BDRVQEDState *s) |
289 |
{ |
290 |
assert(!s->allocating_write_reqs_plugged); |
291 |
|
292 |
s->allocating_write_reqs_plugged = true;
|
293 |
} |
294 |
|
295 |
static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
296 |
{ |
297 |
QEDAIOCB *acb; |
298 |
|
299 |
assert(s->allocating_write_reqs_plugged); |
300 |
|
301 |
s->allocating_write_reqs_plugged = false;
|
302 |
|
303 |
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
304 |
if (acb) {
|
305 |
qed_aio_next_io(acb, 0);
|
306 |
} |
307 |
} |
308 |
|
309 |
static void qed_finish_clear_need_check(void *opaque, int ret) |
310 |
{ |
311 |
/* Do nothing */
|
312 |
} |
313 |
|
314 |
static void qed_flush_after_clear_need_check(void *opaque, int ret) |
315 |
{ |
316 |
BDRVQEDState *s = opaque; |
317 |
|
318 |
bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); |
319 |
|
320 |
/* No need to wait until flush completes */
|
321 |
qed_unplug_allocating_write_reqs(s); |
322 |
} |
323 |
|
324 |
static void qed_clear_need_check(void *opaque, int ret) |
325 |
{ |
326 |
BDRVQEDState *s = opaque; |
327 |
|
328 |
if (ret) {
|
329 |
qed_unplug_allocating_write_reqs(s); |
330 |
return;
|
331 |
} |
332 |
|
333 |
s->header.features &= ~QED_F_NEED_CHECK; |
334 |
qed_write_header(s, qed_flush_after_clear_need_check, s); |
335 |
} |
336 |
|
337 |
static void qed_need_check_timer_cb(void *opaque) |
338 |
{ |
339 |
BDRVQEDState *s = opaque; |
340 |
|
341 |
/* The timer should only fire when allocating writes have drained */
|
342 |
assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); |
343 |
|
344 |
trace_qed_need_check_timer_cb(s); |
345 |
|
346 |
qed_plug_allocating_write_reqs(s); |
347 |
|
348 |
/* Ensure writes are on disk before clearing flag */
|
349 |
bdrv_aio_flush(s->bs, qed_clear_need_check, s); |
350 |
} |
351 |
|
352 |
static void qed_start_need_check_timer(BDRVQEDState *s) |
353 |
{ |
354 |
trace_qed_start_need_check_timer(s); |
355 |
|
356 |
/* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
|
357 |
* migration.
|
358 |
*/
|
359 |
timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + |
360 |
get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); |
361 |
} |
362 |
|
363 |
/* It's okay to call this multiple times or when no timer is started */
|
364 |
static void qed_cancel_need_check_timer(BDRVQEDState *s) |
365 |
{ |
366 |
trace_qed_cancel_need_check_timer(s); |
367 |
timer_del(s->need_check_timer); |
368 |
} |
369 |
|
370 |
static void bdrv_qed_rebind(BlockDriverState *bs) |
371 |
{ |
372 |
BDRVQEDState *s = bs->opaque; |
373 |
s->bs = bs; |
374 |
} |
375 |
|
376 |
static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, |
377 |
Error **errp) |
378 |
{ |
379 |
BDRVQEDState *s = bs->opaque; |
380 |
QEDHeader le_header; |
381 |
int64_t file_size; |
382 |
int ret;
|
383 |
|
384 |
s->bs = bs; |
385 |
QSIMPLEQ_INIT(&s->allocating_write_reqs); |
386 |
|
387 |
ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); |
388 |
if (ret < 0) { |
389 |
return ret;
|
390 |
} |
391 |
qed_header_le_to_cpu(&le_header, &s->header); |
392 |
|
393 |
if (s->header.magic != QED_MAGIC) {
|
394 |
return -EMEDIUMTYPE;
|
395 |
} |
396 |
if (s->header.features & ~QED_FEATURE_MASK) {
|
397 |
/* image uses unsupported feature bits */
|
398 |
char buf[64]; |
399 |
snprintf(buf, sizeof(buf), "%" PRIx64, |
400 |
s->header.features & ~QED_FEATURE_MASK); |
401 |
qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, |
402 |
bs->device_name, "QED", buf);
|
403 |
return -ENOTSUP;
|
404 |
} |
405 |
if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
|
406 |
return -EINVAL;
|
407 |
} |
408 |
|
409 |
/* Round down file size to the last cluster */
|
410 |
file_size = bdrv_getlength(bs->file); |
411 |
if (file_size < 0) { |
412 |
return file_size;
|
413 |
} |
414 |
s->file_size = qed_start_of_cluster(s, file_size); |
415 |
|
416 |
if (!qed_is_table_size_valid(s->header.table_size)) {
|
417 |
return -EINVAL;
|
418 |
} |
419 |
if (!qed_is_image_size_valid(s->header.image_size,
|
420 |
s->header.cluster_size, |
421 |
s->header.table_size)) { |
422 |
return -EINVAL;
|
423 |
} |
424 |
if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
|
425 |
return -EINVAL;
|
426 |
} |
427 |
|
428 |
s->table_nelems = (s->header.cluster_size * s->header.table_size) / |
429 |
sizeof(uint64_t);
|
430 |
s->l2_shift = ffs(s->header.cluster_size) - 1;
|
431 |
s->l2_mask = s->table_nelems - 1;
|
432 |
s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
|
433 |
|
434 |
if ((s->header.features & QED_F_BACKING_FILE)) {
|
435 |
if ((uint64_t)s->header.backing_filename_offset +
|
436 |
s->header.backing_filename_size > |
437 |
s->header.cluster_size * s->header.header_size) { |
438 |
return -EINVAL;
|
439 |
} |
440 |
|
441 |
ret = qed_read_string(bs->file, s->header.backing_filename_offset, |
442 |
s->header.backing_filename_size, bs->backing_file, |
443 |
sizeof(bs->backing_file));
|
444 |
if (ret < 0) { |
445 |
return ret;
|
446 |
} |
447 |
|
448 |
if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
|
449 |
pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); |
450 |
} |
451 |
} |
452 |
|
453 |
/* Reset unknown autoclear feature bits. This is a backwards
|
454 |
* compatibility mechanism that allows images to be opened by older
|
455 |
* programs, which "knock out" unknown feature bits. When an image is
|
456 |
* opened by a newer program again it can detect that the autoclear
|
457 |
* feature is no longer valid.
|
458 |
*/
|
459 |
if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && |
460 |
!bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) { |
461 |
s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; |
462 |
|
463 |
ret = qed_write_header_sync(s); |
464 |
if (ret) {
|
465 |
return ret;
|
466 |
} |
467 |
|
468 |
/* From here on only known autoclear feature bits are valid */
|
469 |
bdrv_flush(bs->file); |
470 |
} |
471 |
|
472 |
s->l1_table = qed_alloc_table(s); |
473 |
qed_init_l2_cache(&s->l2_cache); |
474 |
|
475 |
ret = qed_read_l1_table_sync(s); |
476 |
if (ret) {
|
477 |
goto out;
|
478 |
} |
479 |
|
480 |
/* If image was not closed cleanly, check consistency */
|
481 |
if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
|
482 |
/* Read-only images cannot be fixed. There is no risk of corruption
|
483 |
* since write operations are not possible. Therefore, allow
|
484 |
* potentially inconsistent images to be opened read-only. This can
|
485 |
* aid data recovery from an otherwise inconsistent image.
|
486 |
*/
|
487 |
if (!bdrv_is_read_only(bs->file) &&
|
488 |
!(flags & BDRV_O_INCOMING)) { |
489 |
BdrvCheckResult result = {0};
|
490 |
|
491 |
ret = qed_check(s, &result, true);
|
492 |
if (ret) {
|
493 |
goto out;
|
494 |
} |
495 |
} |
496 |
} |
497 |
|
498 |
s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, |
499 |
qed_need_check_timer_cb, s); |
500 |
|
501 |
out:
|
502 |
if (ret) {
|
503 |
qed_free_l2_cache(&s->l2_cache); |
504 |
qemu_vfree(s->l1_table); |
505 |
} |
506 |
return ret;
|
507 |
} |
508 |
|
509 |
/* We have nothing to do for QED reopen, stubs just return
|
510 |
* success */
|
511 |
static int bdrv_qed_reopen_prepare(BDRVReopenState *state, |
512 |
BlockReopenQueue *queue, Error **errp) |
513 |
{ |
514 |
return 0; |
515 |
} |
516 |
|
517 |
static void bdrv_qed_close(BlockDriverState *bs) |
518 |
{ |
519 |
BDRVQEDState *s = bs->opaque; |
520 |
|
521 |
qed_cancel_need_check_timer(s); |
522 |
timer_free(s->need_check_timer); |
523 |
|
524 |
/* Ensure writes reach stable storage */
|
525 |
bdrv_flush(bs->file); |
526 |
|
527 |
/* Clean shutdown, no check required on next open */
|
528 |
if (s->header.features & QED_F_NEED_CHECK) {
|
529 |
s->header.features &= ~QED_F_NEED_CHECK; |
530 |
qed_write_header_sync(s); |
531 |
} |
532 |
|
533 |
qed_free_l2_cache(&s->l2_cache); |
534 |
qemu_vfree(s->l1_table); |
535 |
} |
536 |
|
537 |
static int qed_create(const char *filename, uint32_t cluster_size, |
538 |
uint64_t image_size, uint32_t table_size, |
539 |
const char *backing_file, const char *backing_fmt) |
540 |
{ |
541 |
QEDHeader header = { |
542 |
.magic = QED_MAGIC, |
543 |
.cluster_size = cluster_size, |
544 |
.table_size = table_size, |
545 |
.header_size = 1,
|
546 |
.features = 0,
|
547 |
.compat_features = 0,
|
548 |
.l1_table_offset = cluster_size, |
549 |
.image_size = image_size, |
550 |
}; |
551 |
QEDHeader le_header; |
552 |
uint8_t *l1_table = NULL;
|
553 |
size_t l1_size = header.cluster_size * header.table_size; |
554 |
int ret = 0; |
555 |
BlockDriverState *bs = NULL;
|
556 |
|
557 |
ret = bdrv_create_file(filename, NULL);
|
558 |
if (ret < 0) { |
559 |
return ret;
|
560 |
} |
561 |
|
562 |
ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB);
|
563 |
if (ret < 0) { |
564 |
return ret;
|
565 |
} |
566 |
|
567 |
/* File must start empty and grow, check truncate is supported */
|
568 |
ret = bdrv_truncate(bs, 0);
|
569 |
if (ret < 0) { |
570 |
goto out;
|
571 |
} |
572 |
|
573 |
if (backing_file) {
|
574 |
header.features |= QED_F_BACKING_FILE; |
575 |
header.backing_filename_offset = sizeof(le_header);
|
576 |
header.backing_filename_size = strlen(backing_file); |
577 |
|
578 |
if (qed_fmt_is_raw(backing_fmt)) {
|
579 |
header.features |= QED_F_BACKING_FORMAT_NO_PROBE; |
580 |
} |
581 |
} |
582 |
|
583 |
qed_header_cpu_to_le(&header, &le_header); |
584 |
ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); |
585 |
if (ret < 0) { |
586 |
goto out;
|
587 |
} |
588 |
ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
|
589 |
header.backing_filename_size); |
590 |
if (ret < 0) { |
591 |
goto out;
|
592 |
} |
593 |
|
594 |
l1_table = g_malloc0(l1_size); |
595 |
ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); |
596 |
if (ret < 0) { |
597 |
goto out;
|
598 |
} |
599 |
|
600 |
ret = 0; /* success */ |
601 |
out:
|
602 |
g_free(l1_table); |
603 |
bdrv_unref(bs); |
604 |
return ret;
|
605 |
} |
606 |
|
607 |
static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options, |
608 |
Error **errp) |
609 |
{ |
610 |
uint64_t image_size = 0;
|
611 |
uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; |
612 |
uint32_t table_size = QED_DEFAULT_TABLE_SIZE; |
613 |
const char *backing_file = NULL; |
614 |
const char *backing_fmt = NULL; |
615 |
|
616 |
while (options && options->name) {
|
617 |
if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
|
618 |
image_size = options->value.n; |
619 |
} else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { |
620 |
backing_file = options->value.s; |
621 |
} else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { |
622 |
backing_fmt = options->value.s; |
623 |
} else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { |
624 |
if (options->value.n) {
|
625 |
cluster_size = options->value.n; |
626 |
} |
627 |
} else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { |
628 |
if (options->value.n) {
|
629 |
table_size = options->value.n; |
630 |
} |
631 |
} |
632 |
options++; |
633 |
} |
634 |
|
635 |
if (!qed_is_cluster_size_valid(cluster_size)) {
|
636 |
fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
|
637 |
QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); |
638 |
return -EINVAL;
|
639 |
} |
640 |
if (!qed_is_table_size_valid(table_size)) {
|
641 |
fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
|
642 |
QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); |
643 |
return -EINVAL;
|
644 |
} |
645 |
if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
|
646 |
fprintf(stderr, "QED image size must be a non-zero multiple of "
|
647 |
"cluster size and less than %" PRIu64 " bytes\n", |
648 |
qed_max_image_size(cluster_size, table_size)); |
649 |
return -EINVAL;
|
650 |
} |
651 |
|
652 |
return qed_create(filename, cluster_size, image_size, table_size,
|
653 |
backing_file, backing_fmt); |
654 |
} |
655 |
|
656 |
typedef struct { |
657 |
BlockDriverState *bs; |
658 |
Coroutine *co; |
659 |
uint64_t pos; |
660 |
int64_t status; |
661 |
int *pnum;
|
662 |
} QEDIsAllocatedCB; |
663 |
|
664 |
static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) |
665 |
{ |
666 |
QEDIsAllocatedCB *cb = opaque; |
667 |
BDRVQEDState *s = cb->bs->opaque; |
668 |
*cb->pnum = len / BDRV_SECTOR_SIZE; |
669 |
switch (ret) {
|
670 |
case QED_CLUSTER_FOUND:
|
671 |
offset |= qed_offset_into_cluster(s, cb->pos); |
672 |
cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; |
673 |
break;
|
674 |
case QED_CLUSTER_ZERO:
|
675 |
cb->status = BDRV_BLOCK_ZERO; |
676 |
break;
|
677 |
case QED_CLUSTER_L2:
|
678 |
case QED_CLUSTER_L1:
|
679 |
cb->status = 0;
|
680 |
break;
|
681 |
default:
|
682 |
assert(ret < 0);
|
683 |
cb->status = ret; |
684 |
break;
|
685 |
} |
686 |
|
687 |
if (cb->co) {
|
688 |
qemu_coroutine_enter(cb->co, NULL);
|
689 |
} |
690 |
} |
691 |
|
692 |
static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
|
693 |
int64_t sector_num, |
694 |
int nb_sectors, int *pnum) |
695 |
{ |
696 |
BDRVQEDState *s = bs->opaque; |
697 |
size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; |
698 |
QEDIsAllocatedCB cb = { |
699 |
.bs = bs, |
700 |
.pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, |
701 |
.status = BDRV_BLOCK_OFFSET_MASK, |
702 |
.pnum = pnum, |
703 |
}; |
704 |
QEDRequest request = { .l2_table = NULL };
|
705 |
|
706 |
qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); |
707 |
|
708 |
/* Now sleep if the callback wasn't invoked immediately */
|
709 |
while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
|
710 |
cb.co = qemu_coroutine_self(); |
711 |
qemu_coroutine_yield(); |
712 |
} |
713 |
|
714 |
qed_unref_l2_cache_entry(request.l2_table); |
715 |
|
716 |
return cb.status;
|
717 |
} |
718 |
|
719 |
static int bdrv_qed_make_empty(BlockDriverState *bs) |
720 |
{ |
721 |
return -ENOTSUP;
|
722 |
} |
723 |
|
724 |
static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
|
725 |
{ |
726 |
return acb->common.bs->opaque;
|
727 |
} |
728 |
|
729 |
/**
|
730 |
* Read from the backing file or zero-fill if no backing file
|
731 |
*
|
732 |
* @s: QED state
|
733 |
* @pos: Byte position in device
|
734 |
* @qiov: Destination I/O vector
|
735 |
* @cb: Completion function
|
736 |
* @opaque: User data for completion function
|
737 |
*
|
738 |
* This function reads qiov->size bytes starting at pos from the backing file.
|
739 |
* If there is no backing file then zeroes are read.
|
740 |
*/
|
741 |
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, |
742 |
QEMUIOVector *qiov, |
743 |
BlockDriverCompletionFunc *cb, void *opaque)
|
744 |
{ |
745 |
uint64_t backing_length = 0;
|
746 |
size_t size; |
747 |
|
748 |
/* If there is a backing file, get its length. Treat the absence of a
|
749 |
* backing file like a zero length backing file.
|
750 |
*/
|
751 |
if (s->bs->backing_hd) {
|
752 |
int64_t l = bdrv_getlength(s->bs->backing_hd); |
753 |
if (l < 0) { |
754 |
cb(opaque, l); |
755 |
return;
|
756 |
} |
757 |
backing_length = l; |
758 |
} |
759 |
|
760 |
/* Zero all sectors if reading beyond the end of the backing file */
|
761 |
if (pos >= backing_length ||
|
762 |
pos + qiov->size > backing_length) { |
763 |
qemu_iovec_memset(qiov, 0, 0, qiov->size); |
764 |
} |
765 |
|
766 |
/* Complete now if there are no backing file sectors to read */
|
767 |
if (pos >= backing_length) {
|
768 |
cb(opaque, 0);
|
769 |
return;
|
770 |
} |
771 |
|
772 |
/* If the read straddles the end of the backing file, shorten it */
|
773 |
size = MIN((uint64_t)backing_length - pos, qiov->size); |
774 |
|
775 |
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); |
776 |
bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, |
777 |
qiov, size / BDRV_SECTOR_SIZE, cb, opaque); |
778 |
} |
779 |
|
780 |
typedef struct { |
781 |
GenericCB gencb; |
782 |
BDRVQEDState *s; |
783 |
QEMUIOVector qiov; |
784 |
struct iovec iov;
|
785 |
uint64_t offset; |
786 |
} CopyFromBackingFileCB; |
787 |
|
788 |
static void qed_copy_from_backing_file_cb(void *opaque, int ret) |
789 |
{ |
790 |
CopyFromBackingFileCB *copy_cb = opaque; |
791 |
qemu_vfree(copy_cb->iov.iov_base); |
792 |
gencb_complete(©_cb->gencb, ret); |
793 |
} |
794 |
|
795 |
static void qed_copy_from_backing_file_write(void *opaque, int ret) |
796 |
{ |
797 |
CopyFromBackingFileCB *copy_cb = opaque; |
798 |
BDRVQEDState *s = copy_cb->s; |
799 |
|
800 |
if (ret) {
|
801 |
qed_copy_from_backing_file_cb(copy_cb, ret); |
802 |
return;
|
803 |
} |
804 |
|
805 |
BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); |
806 |
bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, |
807 |
©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, |
808 |
qed_copy_from_backing_file_cb, copy_cb); |
809 |
} |
810 |
|
811 |
/**
|
812 |
* Copy data from backing file into the image
|
813 |
*
|
814 |
* @s: QED state
|
815 |
* @pos: Byte position in device
|
816 |
* @len: Number of bytes
|
817 |
* @offset: Byte offset in image file
|
818 |
* @cb: Completion function
|
819 |
* @opaque: User data for completion function
|
820 |
*/
|
821 |
static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, |
822 |
uint64_t len, uint64_t offset, |
823 |
BlockDriverCompletionFunc *cb, |
824 |
void *opaque)
|
825 |
{ |
826 |
CopyFromBackingFileCB *copy_cb; |
827 |
|
828 |
/* Skip copy entirely if there is no work to do */
|
829 |
if (len == 0) { |
830 |
cb(opaque, 0);
|
831 |
return;
|
832 |
} |
833 |
|
834 |
copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
|
835 |
copy_cb->s = s; |
836 |
copy_cb->offset = offset; |
837 |
copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); |
838 |
copy_cb->iov.iov_len = len; |
839 |
qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1);
|
840 |
|
841 |
qed_read_backing_file(s, pos, ©_cb->qiov, |
842 |
qed_copy_from_backing_file_write, copy_cb); |
843 |
} |
844 |
|
845 |
/**
|
846 |
* Link one or more contiguous clusters into a table
|
847 |
*
|
848 |
* @s: QED state
|
849 |
* @table: L2 table
|
850 |
* @index: First cluster index
|
851 |
* @n: Number of contiguous clusters
|
852 |
* @cluster: First cluster offset
|
853 |
*
|
854 |
* The cluster offset may be an allocated byte offset in the image file, the
|
855 |
* zero cluster marker, or the unallocated cluster marker.
|
856 |
*/
|
857 |
static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, |
858 |
unsigned int n, uint64_t cluster) |
859 |
{ |
860 |
int i;
|
861 |
for (i = index; i < index + n; i++) {
|
862 |
table->offsets[i] = cluster; |
863 |
if (!qed_offset_is_unalloc_cluster(cluster) &&
|
864 |
!qed_offset_is_zero_cluster(cluster)) { |
865 |
cluster += s->header.cluster_size; |
866 |
} |
867 |
} |
868 |
} |
869 |
|
870 |
static void qed_aio_complete_bh(void *opaque) |
871 |
{ |
872 |
QEDAIOCB *acb = opaque; |
873 |
BlockDriverCompletionFunc *cb = acb->common.cb; |
874 |
void *user_opaque = acb->common.opaque;
|
875 |
int ret = acb->bh_ret;
|
876 |
bool *finished = acb->finished;
|
877 |
|
878 |
qemu_bh_delete(acb->bh); |
879 |
qemu_aio_release(acb); |
880 |
|
881 |
/* Invoke callback */
|
882 |
cb(user_opaque, ret); |
883 |
|
884 |
/* Signal cancel completion */
|
885 |
if (finished) {
|
886 |
*finished = true;
|
887 |
} |
888 |
} |
889 |
|
890 |
static void qed_aio_complete(QEDAIOCB *acb, int ret) |
891 |
{ |
892 |
BDRVQEDState *s = acb_to_s(acb); |
893 |
|
894 |
trace_qed_aio_complete(s, acb, ret); |
895 |
|
896 |
/* Free resources */
|
897 |
qemu_iovec_destroy(&acb->cur_qiov); |
898 |
qed_unref_l2_cache_entry(acb->request.l2_table); |
899 |
|
900 |
/* Free the buffer we may have allocated for zero writes */
|
901 |
if (acb->flags & QED_AIOCB_ZERO) {
|
902 |
qemu_vfree(acb->qiov->iov[0].iov_base);
|
903 |
acb->qiov->iov[0].iov_base = NULL; |
904 |
} |
905 |
|
906 |
/* Arrange for a bh to invoke the completion function */
|
907 |
acb->bh_ret = ret; |
908 |
acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); |
909 |
qemu_bh_schedule(acb->bh); |
910 |
|
911 |
/* Start next allocating write request waiting behind this one. Note that
|
912 |
* requests enqueue themselves when they first hit an unallocated cluster
|
913 |
* but they wait until the entire request is finished before waking up the
|
914 |
* next request in the queue. This ensures that we don't cycle through
|
915 |
* requests multiple times but rather finish one at a time completely.
|
916 |
*/
|
917 |
if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
|
918 |
QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); |
919 |
acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
920 |
if (acb) {
|
921 |
qed_aio_next_io(acb, 0);
|
922 |
} else if (s->header.features & QED_F_NEED_CHECK) { |
923 |
qed_start_need_check_timer(s); |
924 |
} |
925 |
} |
926 |
} |
927 |
|
928 |
/**
|
929 |
* Commit the current L2 table to the cache
|
930 |
*/
|
931 |
static void qed_commit_l2_update(void *opaque, int ret) |
932 |
{ |
933 |
QEDAIOCB *acb = opaque; |
934 |
BDRVQEDState *s = acb_to_s(acb); |
935 |
CachedL2Table *l2_table = acb->request.l2_table; |
936 |
uint64_t l2_offset = l2_table->offset; |
937 |
|
938 |
qed_commit_l2_cache_entry(&s->l2_cache, l2_table); |
939 |
|
940 |
/* This is guaranteed to succeed because we just committed the entry to the
|
941 |
* cache.
|
942 |
*/
|
943 |
acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); |
944 |
assert(acb->request.l2_table != NULL);
|
945 |
|
946 |
qed_aio_next_io(opaque, ret); |
947 |
} |
948 |
|
949 |
/**
|
950 |
* Update L1 table with new L2 table offset and write it out
|
951 |
*/
|
952 |
static void qed_aio_write_l1_update(void *opaque, int ret) |
953 |
{ |
954 |
QEDAIOCB *acb = opaque; |
955 |
BDRVQEDState *s = acb_to_s(acb); |
956 |
int index;
|
957 |
|
958 |
if (ret) {
|
959 |
qed_aio_complete(acb, ret); |
960 |
return;
|
961 |
} |
962 |
|
963 |
index = qed_l1_index(s, acb->cur_pos); |
964 |
s->l1_table->offsets[index] = acb->request.l2_table->offset; |
965 |
|
966 |
qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
|
967 |
} |
968 |
|
969 |
/**
|
970 |
* Update L2 table with new cluster offsets and write them out
|
971 |
*/
|
972 |
static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) |
973 |
{ |
974 |
BDRVQEDState *s = acb_to_s(acb); |
975 |
bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
|
976 |
int index;
|
977 |
|
978 |
if (ret) {
|
979 |
goto err;
|
980 |
} |
981 |
|
982 |
if (need_alloc) {
|
983 |
qed_unref_l2_cache_entry(acb->request.l2_table); |
984 |
acb->request.l2_table = qed_new_l2_table(s); |
985 |
} |
986 |
|
987 |
index = qed_l2_index(s, acb->cur_pos); |
988 |
qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, |
989 |
offset); |
990 |
|
991 |
if (need_alloc) {
|
992 |
/* Write out the whole new L2 table */
|
993 |
qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, |
994 |
qed_aio_write_l1_update, acb); |
995 |
} else {
|
996 |
/* Write out only the updated part of the L2 table */
|
997 |
qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
|
998 |
qed_aio_next_io, acb); |
999 |
} |
1000 |
return;
|
1001 |
|
1002 |
err:
|
1003 |
qed_aio_complete(acb, ret); |
1004 |
} |
1005 |
|
1006 |
static void qed_aio_write_l2_update_cb(void *opaque, int ret) |
1007 |
{ |
1008 |
QEDAIOCB *acb = opaque; |
1009 |
qed_aio_write_l2_update(acb, ret, acb->cur_cluster); |
1010 |
} |
1011 |
|
1012 |
/**
|
1013 |
* Flush new data clusters before updating the L2 table
|
1014 |
*
|
1015 |
* This flush is necessary when a backing file is in use. A crash during an
|
1016 |
* allocating write could result in empty clusters in the image. If the write
|
1017 |
* only touched a subregion of the cluster, then backing image sectors have
|
1018 |
* been lost in the untouched region. The solution is to flush after writing a
|
1019 |
* new data cluster and before updating the L2 table.
|
1020 |
*/
|
1021 |
static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) |
1022 |
{ |
1023 |
QEDAIOCB *acb = opaque; |
1024 |
BDRVQEDState *s = acb_to_s(acb); |
1025 |
|
1026 |
if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
|
1027 |
qed_aio_complete(acb, -EIO); |
1028 |
} |
1029 |
} |
1030 |
|
1031 |
/**
|
1032 |
* Write data to the image file
|
1033 |
*/
|
1034 |
static void qed_aio_write_main(void *opaque, int ret) |
1035 |
{ |
1036 |
QEDAIOCB *acb = opaque; |
1037 |
BDRVQEDState *s = acb_to_s(acb); |
1038 |
uint64_t offset = acb->cur_cluster + |
1039 |
qed_offset_into_cluster(s, acb->cur_pos); |
1040 |
BlockDriverCompletionFunc *next_fn; |
1041 |
|
1042 |
trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); |
1043 |
|
1044 |
if (ret) {
|
1045 |
qed_aio_complete(acb, ret); |
1046 |
return;
|
1047 |
} |
1048 |
|
1049 |
if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
|
1050 |
next_fn = qed_aio_next_io; |
1051 |
} else {
|
1052 |
if (s->bs->backing_hd) {
|
1053 |
next_fn = qed_aio_write_flush_before_l2_update; |
1054 |
} else {
|
1055 |
next_fn = qed_aio_write_l2_update_cb; |
1056 |
} |
1057 |
} |
1058 |
|
1059 |
BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); |
1060 |
bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, |
1061 |
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
1062 |
next_fn, acb); |
1063 |
} |
1064 |
|
1065 |
/**
|
1066 |
* Populate back untouched region of new data cluster
|
1067 |
*/
|
1068 |
static void qed_aio_write_postfill(void *opaque, int ret) |
1069 |
{ |
1070 |
QEDAIOCB *acb = opaque; |
1071 |
BDRVQEDState *s = acb_to_s(acb); |
1072 |
uint64_t start = acb->cur_pos + acb->cur_qiov.size; |
1073 |
uint64_t len = |
1074 |
qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
|
1075 |
uint64_t offset = acb->cur_cluster + |
1076 |
qed_offset_into_cluster(s, acb->cur_pos) + |
1077 |
acb->cur_qiov.size; |
1078 |
|
1079 |
if (ret) {
|
1080 |
qed_aio_complete(acb, ret); |
1081 |
return;
|
1082 |
} |
1083 |
|
1084 |
trace_qed_aio_write_postfill(s, acb, start, len, offset); |
1085 |
qed_copy_from_backing_file(s, start, len, offset, |
1086 |
qed_aio_write_main, acb); |
1087 |
} |
1088 |
|
1089 |
/**
|
1090 |
* Populate front untouched region of new data cluster
|
1091 |
*/
|
1092 |
static void qed_aio_write_prefill(void *opaque, int ret) |
1093 |
{ |
1094 |
QEDAIOCB *acb = opaque; |
1095 |
BDRVQEDState *s = acb_to_s(acb); |
1096 |
uint64_t start = qed_start_of_cluster(s, acb->cur_pos); |
1097 |
uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); |
1098 |
|
1099 |
trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); |
1100 |
qed_copy_from_backing_file(s, start, len, acb->cur_cluster, |
1101 |
qed_aio_write_postfill, acb); |
1102 |
} |
1103 |
|
1104 |
/**
|
1105 |
* Check if the QED_F_NEED_CHECK bit should be set during allocating write
|
1106 |
*/
|
1107 |
static bool qed_should_set_need_check(BDRVQEDState *s) |
1108 |
{ |
1109 |
/* The flush before L2 update path ensures consistency */
|
1110 |
if (s->bs->backing_hd) {
|
1111 |
return false; |
1112 |
} |
1113 |
|
1114 |
return !(s->header.features & QED_F_NEED_CHECK);
|
1115 |
} |
1116 |
|
1117 |
static void qed_aio_write_zero_cluster(void *opaque, int ret) |
1118 |
{ |
1119 |
QEDAIOCB *acb = opaque; |
1120 |
|
1121 |
if (ret) {
|
1122 |
qed_aio_complete(acb, ret); |
1123 |
return;
|
1124 |
} |
1125 |
|
1126 |
qed_aio_write_l2_update(acb, 0, 1); |
1127 |
} |
1128 |
|
1129 |
/**
|
1130 |
* Write new data cluster
|
1131 |
*
|
1132 |
* @acb: Write request
|
1133 |
* @len: Length in bytes
|
1134 |
*
|
1135 |
* This path is taken when writing to previously unallocated clusters.
|
1136 |
*/
|
1137 |
static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
1138 |
{ |
1139 |
BDRVQEDState *s = acb_to_s(acb); |
1140 |
BlockDriverCompletionFunc *cb; |
1141 |
|
1142 |
/* Cancel timer when the first allocating request comes in */
|
1143 |
if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
|
1144 |
qed_cancel_need_check_timer(s); |
1145 |
} |
1146 |
|
1147 |
/* Freeze this request if another allocating write is in progress */
|
1148 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
|
1149 |
QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); |
1150 |
} |
1151 |
if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
|
1152 |
s->allocating_write_reqs_plugged) { |
1153 |
return; /* wait for existing request to finish */ |
1154 |
} |
1155 |
|
1156 |
acb->cur_nclusters = qed_bytes_to_clusters(s, |
1157 |
qed_offset_into_cluster(s, acb->cur_pos) + len); |
1158 |
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1159 |
|
1160 |
if (acb->flags & QED_AIOCB_ZERO) {
|
1161 |
/* Skip ahead if the clusters are already zero */
|
1162 |
if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
|
1163 |
qed_aio_next_io(acb, 0);
|
1164 |
return;
|
1165 |
} |
1166 |
|
1167 |
cb = qed_aio_write_zero_cluster; |
1168 |
} else {
|
1169 |
cb = qed_aio_write_prefill; |
1170 |
acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); |
1171 |
} |
1172 |
|
1173 |
if (qed_should_set_need_check(s)) {
|
1174 |
s->header.features |= QED_F_NEED_CHECK; |
1175 |
qed_write_header(s, cb, acb); |
1176 |
} else {
|
1177 |
cb(acb, 0);
|
1178 |
} |
1179 |
} |
1180 |
|
1181 |
/**
|
1182 |
* Write data cluster in place
|
1183 |
*
|
1184 |
* @acb: Write request
|
1185 |
* @offset: Cluster offset in bytes
|
1186 |
* @len: Length in bytes
|
1187 |
*
|
1188 |
* This path is taken when writing to already allocated clusters.
|
1189 |
*/
|
1190 |
static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) |
1191 |
{ |
1192 |
/* Allocate buffer for zero writes */
|
1193 |
if (acb->flags & QED_AIOCB_ZERO) {
|
1194 |
struct iovec *iov = acb->qiov->iov;
|
1195 |
|
1196 |
if (!iov->iov_base) {
|
1197 |
iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len); |
1198 |
memset(iov->iov_base, 0, iov->iov_len);
|
1199 |
} |
1200 |
} |
1201 |
|
1202 |
/* Calculate the I/O vector */
|
1203 |
acb->cur_cluster = offset; |
1204 |
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1205 |
|
1206 |
/* Do the actual write */
|
1207 |
qed_aio_write_main(acb, 0);
|
1208 |
} |
1209 |
|
1210 |
/**
|
1211 |
* Write data cluster
|
1212 |
*
|
1213 |
* @opaque: Write request
|
1214 |
* @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
|
1215 |
* or -errno
|
1216 |
* @offset: Cluster offset in bytes
|
1217 |
* @len: Length in bytes
|
1218 |
*
|
1219 |
* Callback from qed_find_cluster().
|
1220 |
*/
|
1221 |
static void qed_aio_write_data(void *opaque, int ret, |
1222 |
uint64_t offset, size_t len) |
1223 |
{ |
1224 |
QEDAIOCB *acb = opaque; |
1225 |
|
1226 |
trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); |
1227 |
|
1228 |
acb->find_cluster_ret = ret; |
1229 |
|
1230 |
switch (ret) {
|
1231 |
case QED_CLUSTER_FOUND:
|
1232 |
qed_aio_write_inplace(acb, offset, len); |
1233 |
break;
|
1234 |
|
1235 |
case QED_CLUSTER_L2:
|
1236 |
case QED_CLUSTER_L1:
|
1237 |
case QED_CLUSTER_ZERO:
|
1238 |
qed_aio_write_alloc(acb, len); |
1239 |
break;
|
1240 |
|
1241 |
default:
|
1242 |
qed_aio_complete(acb, ret); |
1243 |
break;
|
1244 |
} |
1245 |
} |
1246 |
|
1247 |
/**
|
1248 |
* Read data cluster
|
1249 |
*
|
1250 |
* @opaque: Read request
|
1251 |
* @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
|
1252 |
* or -errno
|
1253 |
* @offset: Cluster offset in bytes
|
1254 |
* @len: Length in bytes
|
1255 |
*
|
1256 |
* Callback from qed_find_cluster().
|
1257 |
*/
|
1258 |
static void qed_aio_read_data(void *opaque, int ret, |
1259 |
uint64_t offset, size_t len) |
1260 |
{ |
1261 |
QEDAIOCB *acb = opaque; |
1262 |
BDRVQEDState *s = acb_to_s(acb); |
1263 |
BlockDriverState *bs = acb->common.bs; |
1264 |
|
1265 |
/* Adjust offset into cluster */
|
1266 |
offset += qed_offset_into_cluster(s, acb->cur_pos); |
1267 |
|
1268 |
trace_qed_aio_read_data(s, acb, ret, offset, len); |
1269 |
|
1270 |
if (ret < 0) { |
1271 |
goto err;
|
1272 |
} |
1273 |
|
1274 |
qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1275 |
|
1276 |
/* Handle zero cluster and backing file reads */
|
1277 |
if (ret == QED_CLUSTER_ZERO) {
|
1278 |
qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); |
1279 |
qed_aio_next_io(acb, 0);
|
1280 |
return;
|
1281 |
} else if (ret != QED_CLUSTER_FOUND) { |
1282 |
qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, |
1283 |
qed_aio_next_io, acb); |
1284 |
return;
|
1285 |
} |
1286 |
|
1287 |
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); |
1288 |
bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, |
1289 |
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
1290 |
qed_aio_next_io, acb); |
1291 |
return;
|
1292 |
|
1293 |
err:
|
1294 |
qed_aio_complete(acb, ret); |
1295 |
} |
1296 |
|
1297 |
/**
|
1298 |
* Begin next I/O or complete the request
|
1299 |
*/
|
1300 |
static void qed_aio_next_io(void *opaque, int ret) |
1301 |
{ |
1302 |
QEDAIOCB *acb = opaque; |
1303 |
BDRVQEDState *s = acb_to_s(acb); |
1304 |
QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? |
1305 |
qed_aio_write_data : qed_aio_read_data; |
1306 |
|
1307 |
trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); |
1308 |
|
1309 |
/* Handle I/O error */
|
1310 |
if (ret) {
|
1311 |
qed_aio_complete(acb, ret); |
1312 |
return;
|
1313 |
} |
1314 |
|
1315 |
acb->qiov_offset += acb->cur_qiov.size; |
1316 |
acb->cur_pos += acb->cur_qiov.size; |
1317 |
qemu_iovec_reset(&acb->cur_qiov); |
1318 |
|
1319 |
/* Complete request */
|
1320 |
if (acb->cur_pos >= acb->end_pos) {
|
1321 |
qed_aio_complete(acb, 0);
|
1322 |
return;
|
1323 |
} |
1324 |
|
1325 |
/* Find next cluster and start I/O */
|
1326 |
qed_find_cluster(s, &acb->request, |
1327 |
acb->cur_pos, acb->end_pos - acb->cur_pos, |
1328 |
io_fn, acb); |
1329 |
} |
1330 |
|
1331 |
static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
|
1332 |
int64_t sector_num, |
1333 |
QEMUIOVector *qiov, int nb_sectors,
|
1334 |
BlockDriverCompletionFunc *cb, |
1335 |
void *opaque, int flags) |
1336 |
{ |
1337 |
QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); |
1338 |
|
1339 |
trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, |
1340 |
opaque, flags); |
1341 |
|
1342 |
acb->flags = flags; |
1343 |
acb->finished = NULL;
|
1344 |
acb->qiov = qiov; |
1345 |
acb->qiov_offset = 0;
|
1346 |
acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; |
1347 |
acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; |
1348 |
acb->request.l2_table = NULL;
|
1349 |
qemu_iovec_init(&acb->cur_qiov, qiov->niov); |
1350 |
|
1351 |
/* Start request */
|
1352 |
qed_aio_next_io(acb, 0);
|
1353 |
return &acb->common;
|
1354 |
} |
1355 |
|
1356 |
static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
|
1357 |
int64_t sector_num, |
1358 |
QEMUIOVector *qiov, int nb_sectors,
|
1359 |
BlockDriverCompletionFunc *cb, |
1360 |
void *opaque)
|
1361 |
{ |
1362 |
return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); |
1363 |
} |
1364 |
|
1365 |
static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
|
1366 |
int64_t sector_num, |
1367 |
QEMUIOVector *qiov, int nb_sectors,
|
1368 |
BlockDriverCompletionFunc *cb, |
1369 |
void *opaque)
|
1370 |
{ |
1371 |
return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
|
1372 |
opaque, QED_AIOCB_WRITE); |
1373 |
} |
1374 |
|
1375 |
typedef struct { |
1376 |
Coroutine *co; |
1377 |
int ret;
|
1378 |
bool done;
|
1379 |
} QEDWriteZeroesCB; |
1380 |
|
1381 |
static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret) |
1382 |
{ |
1383 |
QEDWriteZeroesCB *cb = opaque; |
1384 |
|
1385 |
cb->done = true;
|
1386 |
cb->ret = ret; |
1387 |
if (cb->co) {
|
1388 |
qemu_coroutine_enter(cb->co, NULL);
|
1389 |
} |
1390 |
} |
1391 |
|
1392 |
static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs, |
1393 |
int64_t sector_num, |
1394 |
int nb_sectors)
|
1395 |
{ |
1396 |
BlockDriverAIOCB *blockacb; |
1397 |
BDRVQEDState *s = bs->opaque; |
1398 |
QEDWriteZeroesCB cb = { .done = false };
|
1399 |
QEMUIOVector qiov; |
1400 |
struct iovec iov;
|
1401 |
|
1402 |
/* Refuse if there are untouched backing file sectors */
|
1403 |
if (bs->backing_hd) {
|
1404 |
if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) { |
1405 |
return -ENOTSUP;
|
1406 |
} |
1407 |
if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) { |
1408 |
return -ENOTSUP;
|
1409 |
} |
1410 |
} |
1411 |
|
1412 |
/* Zero writes start without an I/O buffer. If a buffer becomes necessary
|
1413 |
* then it will be allocated during request processing.
|
1414 |
*/
|
1415 |
iov.iov_base = NULL,
|
1416 |
iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE, |
1417 |
|
1418 |
qemu_iovec_init_external(&qiov, &iov, 1);
|
1419 |
blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors, |
1420 |
qed_co_write_zeroes_cb, &cb, |
1421 |
QED_AIOCB_WRITE | QED_AIOCB_ZERO); |
1422 |
if (!blockacb) {
|
1423 |
return -EIO;
|
1424 |
} |
1425 |
if (!cb.done) {
|
1426 |
cb.co = qemu_coroutine_self(); |
1427 |
qemu_coroutine_yield(); |
1428 |
} |
1429 |
assert(cb.done); |
1430 |
return cb.ret;
|
1431 |
} |
1432 |
|
1433 |
static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) |
1434 |
{ |
1435 |
BDRVQEDState *s = bs->opaque; |
1436 |
uint64_t old_image_size; |
1437 |
int ret;
|
1438 |
|
1439 |
if (!qed_is_image_size_valid(offset, s->header.cluster_size,
|
1440 |
s->header.table_size)) { |
1441 |
return -EINVAL;
|
1442 |
} |
1443 |
|
1444 |
/* Shrinking is currently not supported */
|
1445 |
if ((uint64_t)offset < s->header.image_size) {
|
1446 |
return -ENOTSUP;
|
1447 |
} |
1448 |
|
1449 |
old_image_size = s->header.image_size; |
1450 |
s->header.image_size = offset; |
1451 |
ret = qed_write_header_sync(s); |
1452 |
if (ret < 0) { |
1453 |
s->header.image_size = old_image_size; |
1454 |
} |
1455 |
return ret;
|
1456 |
} |
1457 |
|
1458 |
static int64_t bdrv_qed_getlength(BlockDriverState *bs)
|
1459 |
{ |
1460 |
BDRVQEDState *s = bs->opaque; |
1461 |
return s->header.image_size;
|
1462 |
} |
1463 |
|
1464 |
static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) |
1465 |
{ |
1466 |
BDRVQEDState *s = bs->opaque; |
1467 |
|
1468 |
memset(bdi, 0, sizeof(*bdi)); |
1469 |
bdi->cluster_size = s->header.cluster_size; |
1470 |
bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; |
1471 |
return 0; |
1472 |
} |
1473 |
|
1474 |
static int bdrv_qed_change_backing_file(BlockDriverState *bs, |
1475 |
const char *backing_file, |
1476 |
const char *backing_fmt) |
1477 |
{ |
1478 |
BDRVQEDState *s = bs->opaque; |
1479 |
QEDHeader new_header, le_header; |
1480 |
void *buffer;
|
1481 |
size_t buffer_len, backing_file_len; |
1482 |
int ret;
|
1483 |
|
1484 |
/* Refuse to set backing filename if unknown compat feature bits are
|
1485 |
* active. If the image uses an unknown compat feature then we may not
|
1486 |
* know the layout of data following the header structure and cannot safely
|
1487 |
* add a new string.
|
1488 |
*/
|
1489 |
if (backing_file && (s->header.compat_features &
|
1490 |
~QED_COMPAT_FEATURE_MASK)) { |
1491 |
return -ENOTSUP;
|
1492 |
} |
1493 |
|
1494 |
memcpy(&new_header, &s->header, sizeof(new_header));
|
1495 |
|
1496 |
new_header.features &= ~(QED_F_BACKING_FILE | |
1497 |
QED_F_BACKING_FORMAT_NO_PROBE); |
1498 |
|
1499 |
/* Adjust feature flags */
|
1500 |
if (backing_file) {
|
1501 |
new_header.features |= QED_F_BACKING_FILE; |
1502 |
|
1503 |
if (qed_fmt_is_raw(backing_fmt)) {
|
1504 |
new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; |
1505 |
} |
1506 |
} |
1507 |
|
1508 |
/* Calculate new header size */
|
1509 |
backing_file_len = 0;
|
1510 |
|
1511 |
if (backing_file) {
|
1512 |
backing_file_len = strlen(backing_file); |
1513 |
} |
1514 |
|
1515 |
buffer_len = sizeof(new_header);
|
1516 |
new_header.backing_filename_offset = buffer_len; |
1517 |
new_header.backing_filename_size = backing_file_len; |
1518 |
buffer_len += backing_file_len; |
1519 |
|
1520 |
/* Make sure we can rewrite header without failing */
|
1521 |
if (buffer_len > new_header.header_size * new_header.cluster_size) {
|
1522 |
return -ENOSPC;
|
1523 |
} |
1524 |
|
1525 |
/* Prepare new header */
|
1526 |
buffer = g_malloc(buffer_len); |
1527 |
|
1528 |
qed_header_cpu_to_le(&new_header, &le_header); |
1529 |
memcpy(buffer, &le_header, sizeof(le_header));
|
1530 |
buffer_len = sizeof(le_header);
|
1531 |
|
1532 |
if (backing_file) {
|
1533 |
memcpy(buffer + buffer_len, backing_file, backing_file_len); |
1534 |
buffer_len += backing_file_len; |
1535 |
} |
1536 |
|
1537 |
/* Write new header */
|
1538 |
ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
|
1539 |
g_free(buffer); |
1540 |
if (ret == 0) { |
1541 |
memcpy(&s->header, &new_header, sizeof(new_header));
|
1542 |
} |
1543 |
return ret;
|
1544 |
} |
1545 |
|
1546 |
static void bdrv_qed_invalidate_cache(BlockDriverState *bs) |
1547 |
{ |
1548 |
BDRVQEDState *s = bs->opaque; |
1549 |
|
1550 |
bdrv_qed_close(bs); |
1551 |
memset(s, 0, sizeof(BDRVQEDState)); |
1552 |
bdrv_qed_open(bs, NULL, bs->open_flags, NULL); |
1553 |
} |
1554 |
|
1555 |
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, |
1556 |
BdrvCheckMode fix) |
1557 |
{ |
1558 |
BDRVQEDState *s = bs->opaque; |
1559 |
|
1560 |
return qed_check(s, result, !!fix);
|
1561 |
} |
1562 |
|
1563 |
static QEMUOptionParameter qed_create_options[] = {
|
1564 |
{ |
1565 |
.name = BLOCK_OPT_SIZE, |
1566 |
.type = OPT_SIZE, |
1567 |
.help = "Virtual disk size (in bytes)"
|
1568 |
}, { |
1569 |
.name = BLOCK_OPT_BACKING_FILE, |
1570 |
.type = OPT_STRING, |
1571 |
.help = "File name of a base image"
|
1572 |
}, { |
1573 |
.name = BLOCK_OPT_BACKING_FMT, |
1574 |
.type = OPT_STRING, |
1575 |
.help = "Image format of the base image"
|
1576 |
}, { |
1577 |
.name = BLOCK_OPT_CLUSTER_SIZE, |
1578 |
.type = OPT_SIZE, |
1579 |
.help = "Cluster size (in bytes)",
|
1580 |
.value = { .n = QED_DEFAULT_CLUSTER_SIZE }, |
1581 |
}, { |
1582 |
.name = BLOCK_OPT_TABLE_SIZE, |
1583 |
.type = OPT_SIZE, |
1584 |
.help = "L1/L2 table size (in clusters)"
|
1585 |
}, |
1586 |
{ /* end of list */ }
|
1587 |
}; |
1588 |
|
1589 |
static BlockDriver bdrv_qed = {
|
1590 |
.format_name = "qed",
|
1591 |
.instance_size = sizeof(BDRVQEDState),
|
1592 |
.create_options = qed_create_options, |
1593 |
|
1594 |
.bdrv_probe = bdrv_qed_probe, |
1595 |
.bdrv_rebind = bdrv_qed_rebind, |
1596 |
.bdrv_open = bdrv_qed_open, |
1597 |
.bdrv_close = bdrv_qed_close, |
1598 |
.bdrv_reopen_prepare = bdrv_qed_reopen_prepare, |
1599 |
.bdrv_create = bdrv_qed_create, |
1600 |
.bdrv_has_zero_init = bdrv_has_zero_init_1, |
1601 |
.bdrv_co_get_block_status = bdrv_qed_co_get_block_status, |
1602 |
.bdrv_make_empty = bdrv_qed_make_empty, |
1603 |
.bdrv_aio_readv = bdrv_qed_aio_readv, |
1604 |
.bdrv_aio_writev = bdrv_qed_aio_writev, |
1605 |
.bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes, |
1606 |
.bdrv_truncate = bdrv_qed_truncate, |
1607 |
.bdrv_getlength = bdrv_qed_getlength, |
1608 |
.bdrv_get_info = bdrv_qed_get_info, |
1609 |
.bdrv_change_backing_file = bdrv_qed_change_backing_file, |
1610 |
.bdrv_invalidate_cache = bdrv_qed_invalidate_cache, |
1611 |
.bdrv_check = bdrv_qed_check, |
1612 |
}; |
1613 |
|
1614 |
static void bdrv_qed_init(void) |
1615 |
{ |
1616 |
bdrv_register(&bdrv_qed); |
1617 |
} |
1618 |
|
1619 |
block_init(bdrv_qed_init); |