Statistics
| Branch: | Revision:

root / block / qed.c @ 34b5d2c6

History | View | Annotate | Download (46.7 kB)

1
/*
2
 * QEMU Enhanced Disk Format
3
 *
4
 * Copyright IBM, Corp. 2010
5
 *
6
 * Authors:
7
 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *
10
 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11
 * See the COPYING.LIB file in the top-level directory.
12
 *
13
 */
14

    
15
#include "qemu/timer.h"
16
#include "trace.h"
17
#include "qed.h"
18
#include "qapi/qmp/qerror.h"
19
#include "migration/migration.h"
20

    
21
static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
22
{
23
    QEDAIOCB *acb = (QEDAIOCB *)blockacb;
24
    bool finished = false;
25

    
26
    /* Wait for the request to finish */
27
    acb->finished = &finished;
28
    while (!finished) {
29
        qemu_aio_wait();
30
    }
31
}
32

    
33
static const AIOCBInfo qed_aiocb_info = {
34
    .aiocb_size         = sizeof(QEDAIOCB),
35
    .cancel             = qed_aio_cancel,
36
};
37

    
38
static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
39
                          const char *filename)
40
{
41
    const QEDHeader *header = (const QEDHeader *)buf;
42

    
43
    if (buf_size < sizeof(*header)) {
44
        return 0;
45
    }
46
    if (le32_to_cpu(header->magic) != QED_MAGIC) {
47
        return 0;
48
    }
49
    return 100;
50
}
51

    
52
/**
53
 * Check whether an image format is raw
54
 *
55
 * @fmt:    Backing file format, may be NULL
56
 */
57
static bool qed_fmt_is_raw(const char *fmt)
58
{
59
    return fmt && strcmp(fmt, "raw") == 0;
60
}
61

    
62
static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
63
{
64
    cpu->magic = le32_to_cpu(le->magic);
65
    cpu->cluster_size = le32_to_cpu(le->cluster_size);
66
    cpu->table_size = le32_to_cpu(le->table_size);
67
    cpu->header_size = le32_to_cpu(le->header_size);
68
    cpu->features = le64_to_cpu(le->features);
69
    cpu->compat_features = le64_to_cpu(le->compat_features);
70
    cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
71
    cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
72
    cpu->image_size = le64_to_cpu(le->image_size);
73
    cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
74
    cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
75
}
76

    
77
static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
78
{
79
    le->magic = cpu_to_le32(cpu->magic);
80
    le->cluster_size = cpu_to_le32(cpu->cluster_size);
81
    le->table_size = cpu_to_le32(cpu->table_size);
82
    le->header_size = cpu_to_le32(cpu->header_size);
83
    le->features = cpu_to_le64(cpu->features);
84
    le->compat_features = cpu_to_le64(cpu->compat_features);
85
    le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
86
    le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
87
    le->image_size = cpu_to_le64(cpu->image_size);
88
    le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
89
    le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
90
}
91

    
92
int qed_write_header_sync(BDRVQEDState *s)
93
{
94
    QEDHeader le;
95
    int ret;
96

    
97
    qed_header_cpu_to_le(&s->header, &le);
98
    ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
99
    if (ret != sizeof(le)) {
100
        return ret;
101
    }
102
    return 0;
103
}
104

    
105
typedef struct {
106
    GenericCB gencb;
107
    BDRVQEDState *s;
108
    struct iovec iov;
109
    QEMUIOVector qiov;
110
    int nsectors;
111
    uint8_t *buf;
112
} QEDWriteHeaderCB;
113

    
114
static void qed_write_header_cb(void *opaque, int ret)
115
{
116
    QEDWriteHeaderCB *write_header_cb = opaque;
117

    
118
    qemu_vfree(write_header_cb->buf);
119
    gencb_complete(write_header_cb, ret);
120
}
121

    
122
static void qed_write_header_read_cb(void *opaque, int ret)
123
{
124
    QEDWriteHeaderCB *write_header_cb = opaque;
125
    BDRVQEDState *s = write_header_cb->s;
126

    
127
    if (ret) {
128
        qed_write_header_cb(write_header_cb, ret);
129
        return;
130
    }
131

    
132
    /* Update header */
133
    qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
134

    
135
    bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
136
                    write_header_cb->nsectors, qed_write_header_cb,
137
                    write_header_cb);
138
}
139

    
140
/**
141
 * Update header in-place (does not rewrite backing filename or other strings)
142
 *
143
 * This function only updates known header fields in-place and does not affect
144
 * extra data after the QED header.
145
 */
146
static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
147
                             void *opaque)
148
{
149
    /* We must write full sectors for O_DIRECT but cannot necessarily generate
150
     * the data following the header if an unrecognized compat feature is
151
     * active.  Therefore, first read the sectors containing the header, update
152
     * them, and write back.
153
     */
154

    
155
    int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
156
                   BDRV_SECTOR_SIZE;
157
    size_t len = nsectors * BDRV_SECTOR_SIZE;
158
    QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
159
                                                    cb, opaque);
160

    
161
    write_header_cb->s = s;
162
    write_header_cb->nsectors = nsectors;
163
    write_header_cb->buf = qemu_blockalign(s->bs, len);
164
    write_header_cb->iov.iov_base = write_header_cb->buf;
165
    write_header_cb->iov.iov_len = len;
166
    qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
167

    
168
    bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
169
                   qed_write_header_read_cb, write_header_cb);
170
}
171

    
172
static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
173
{
174
    uint64_t table_entries;
175
    uint64_t l2_size;
176

    
177
    table_entries = (table_size * cluster_size) / sizeof(uint64_t);
178
    l2_size = table_entries * cluster_size;
179

    
180
    return l2_size * table_entries;
181
}
182

    
183
static bool qed_is_cluster_size_valid(uint32_t cluster_size)
184
{
185
    if (cluster_size < QED_MIN_CLUSTER_SIZE ||
186
        cluster_size > QED_MAX_CLUSTER_SIZE) {
187
        return false;
188
    }
189
    if (cluster_size & (cluster_size - 1)) {
190
        return false; /* not power of 2 */
191
    }
192
    return true;
193
}
194

    
195
static bool qed_is_table_size_valid(uint32_t table_size)
196
{
197
    if (table_size < QED_MIN_TABLE_SIZE ||
198
        table_size > QED_MAX_TABLE_SIZE) {
199
        return false;
200
    }
201
    if (table_size & (table_size - 1)) {
202
        return false; /* not power of 2 */
203
    }
204
    return true;
205
}
206

    
207
static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
208
                                    uint32_t table_size)
209
{
210
    if (image_size % BDRV_SECTOR_SIZE != 0) {
211
        return false; /* not multiple of sector size */
212
    }
213
    if (image_size > qed_max_image_size(cluster_size, table_size)) {
214
        return false; /* image is too large */
215
    }
216
    return true;
217
}
218

    
219
/**
220
 * Read a string of known length from the image file
221
 *
222
 * @file:       Image file
223
 * @offset:     File offset to start of string, in bytes
224
 * @n:          String length in bytes
225
 * @buf:        Destination buffer
226
 * @buflen:     Destination buffer length in bytes
227
 * @ret:        0 on success, -errno on failure
228
 *
229
 * The string is NUL-terminated.
230
 */
231
static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
232
                           char *buf, size_t buflen)
233
{
234
    int ret;
235
    if (n >= buflen) {
236
        return -EINVAL;
237
    }
238
    ret = bdrv_pread(file, offset, buf, n);
239
    if (ret < 0) {
240
        return ret;
241
    }
242
    buf[n] = '\0';
243
    return 0;
244
}
245

    
246
/**
247
 * Allocate new clusters
248
 *
249
 * @s:          QED state
250
 * @n:          Number of contiguous clusters to allocate
251
 * @ret:        Offset of first allocated cluster
252
 *
253
 * This function only produces the offset where the new clusters should be
254
 * written.  It updates BDRVQEDState but does not make any changes to the image
255
 * file.
256
 */
257
static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
258
{
259
    uint64_t offset = s->file_size;
260
    s->file_size += n * s->header.cluster_size;
261
    return offset;
262
}
263

    
264
QEDTable *qed_alloc_table(BDRVQEDState *s)
265
{
266
    /* Honor O_DIRECT memory alignment requirements */
267
    return qemu_blockalign(s->bs,
268
                           s->header.cluster_size * s->header.table_size);
269
}
270

    
271
/**
272
 * Allocate a new zeroed L2 table
273
 */
274
static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
275
{
276
    CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
277

    
278
    l2_table->table = qed_alloc_table(s);
279
    l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
280

    
281
    memset(l2_table->table->offsets, 0,
282
           s->header.cluster_size * s->header.table_size);
283
    return l2_table;
284
}
285

    
286
static void qed_aio_next_io(void *opaque, int ret);
287

    
288
static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
289
{
290
    assert(!s->allocating_write_reqs_plugged);
291

    
292
    s->allocating_write_reqs_plugged = true;
293
}
294

    
295
static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
296
{
297
    QEDAIOCB *acb;
298

    
299
    assert(s->allocating_write_reqs_plugged);
300

    
301
    s->allocating_write_reqs_plugged = false;
302

    
303
    acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
304
    if (acb) {
305
        qed_aio_next_io(acb, 0);
306
    }
307
}
308

    
309
static void qed_finish_clear_need_check(void *opaque, int ret)
310
{
311
    /* Do nothing */
312
}
313

    
314
static void qed_flush_after_clear_need_check(void *opaque, int ret)
315
{
316
    BDRVQEDState *s = opaque;
317

    
318
    bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
319

    
320
    /* No need to wait until flush completes */
321
    qed_unplug_allocating_write_reqs(s);
322
}
323

    
324
static void qed_clear_need_check(void *opaque, int ret)
325
{
326
    BDRVQEDState *s = opaque;
327

    
328
    if (ret) {
329
        qed_unplug_allocating_write_reqs(s);
330
        return;
331
    }
332

    
333
    s->header.features &= ~QED_F_NEED_CHECK;
334
    qed_write_header(s, qed_flush_after_clear_need_check, s);
335
}
336

    
337
static void qed_need_check_timer_cb(void *opaque)
338
{
339
    BDRVQEDState *s = opaque;
340

    
341
    /* The timer should only fire when allocating writes have drained */
342
    assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
343

    
344
    trace_qed_need_check_timer_cb(s);
345

    
346
    qed_plug_allocating_write_reqs(s);
347

    
348
    /* Ensure writes are on disk before clearing flag */
349
    bdrv_aio_flush(s->bs, qed_clear_need_check, s);
350
}
351

    
352
static void qed_start_need_check_timer(BDRVQEDState *s)
353
{
354
    trace_qed_start_need_check_timer(s);
355

    
356
    /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
357
     * migration.
358
     */
359
    timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
360
                   get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
361
}
362

    
363
/* It's okay to call this multiple times or when no timer is started */
364
static void qed_cancel_need_check_timer(BDRVQEDState *s)
365
{
366
    trace_qed_cancel_need_check_timer(s);
367
    timer_del(s->need_check_timer);
368
}
369

    
370
static void bdrv_qed_rebind(BlockDriverState *bs)
371
{
372
    BDRVQEDState *s = bs->opaque;
373
    s->bs = bs;
374
}
375

    
376
static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
377
                         Error **errp)
378
{
379
    BDRVQEDState *s = bs->opaque;
380
    QEDHeader le_header;
381
    int64_t file_size;
382
    int ret;
383

    
384
    s->bs = bs;
385
    QSIMPLEQ_INIT(&s->allocating_write_reqs);
386

    
387
    ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
388
    if (ret < 0) {
389
        return ret;
390
    }
391
    qed_header_le_to_cpu(&le_header, &s->header);
392

    
393
    if (s->header.magic != QED_MAGIC) {
394
        return -EMEDIUMTYPE;
395
    }
396
    if (s->header.features & ~QED_FEATURE_MASK) {
397
        /* image uses unsupported feature bits */
398
        char buf[64];
399
        snprintf(buf, sizeof(buf), "%" PRIx64,
400
            s->header.features & ~QED_FEATURE_MASK);
401
        qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
402
            bs->device_name, "QED", buf);
403
        return -ENOTSUP;
404
    }
405
    if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
406
        return -EINVAL;
407
    }
408

    
409
    /* Round down file size to the last cluster */
410
    file_size = bdrv_getlength(bs->file);
411
    if (file_size < 0) {
412
        return file_size;
413
    }
414
    s->file_size = qed_start_of_cluster(s, file_size);
415

    
416
    if (!qed_is_table_size_valid(s->header.table_size)) {
417
        return -EINVAL;
418
    }
419
    if (!qed_is_image_size_valid(s->header.image_size,
420
                                 s->header.cluster_size,
421
                                 s->header.table_size)) {
422
        return -EINVAL;
423
    }
424
    if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
425
        return -EINVAL;
426
    }
427

    
428
    s->table_nelems = (s->header.cluster_size * s->header.table_size) /
429
                      sizeof(uint64_t);
430
    s->l2_shift = ffs(s->header.cluster_size) - 1;
431
    s->l2_mask = s->table_nelems - 1;
432
    s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
433

    
434
    if ((s->header.features & QED_F_BACKING_FILE)) {
435
        if ((uint64_t)s->header.backing_filename_offset +
436
            s->header.backing_filename_size >
437
            s->header.cluster_size * s->header.header_size) {
438
            return -EINVAL;
439
        }
440

    
441
        ret = qed_read_string(bs->file, s->header.backing_filename_offset,
442
                              s->header.backing_filename_size, bs->backing_file,
443
                              sizeof(bs->backing_file));
444
        if (ret < 0) {
445
            return ret;
446
        }
447

    
448
        if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
449
            pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
450
        }
451
    }
452

    
453
    /* Reset unknown autoclear feature bits.  This is a backwards
454
     * compatibility mechanism that allows images to be opened by older
455
     * programs, which "knock out" unknown feature bits.  When an image is
456
     * opened by a newer program again it can detect that the autoclear
457
     * feature is no longer valid.
458
     */
459
    if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
460
        !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) {
461
        s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
462

    
463
        ret = qed_write_header_sync(s);
464
        if (ret) {
465
            return ret;
466
        }
467

    
468
        /* From here on only known autoclear feature bits are valid */
469
        bdrv_flush(bs->file);
470
    }
471

    
472
    s->l1_table = qed_alloc_table(s);
473
    qed_init_l2_cache(&s->l2_cache);
474

    
475
    ret = qed_read_l1_table_sync(s);
476
    if (ret) {
477
        goto out;
478
    }
479

    
480
    /* If image was not closed cleanly, check consistency */
481
    if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
482
        /* Read-only images cannot be fixed.  There is no risk of corruption
483
         * since write operations are not possible.  Therefore, allow
484
         * potentially inconsistent images to be opened read-only.  This can
485
         * aid data recovery from an otherwise inconsistent image.
486
         */
487
        if (!bdrv_is_read_only(bs->file) &&
488
            !(flags & BDRV_O_INCOMING)) {
489
            BdrvCheckResult result = {0};
490

    
491
            ret = qed_check(s, &result, true);
492
            if (ret) {
493
                goto out;
494
            }
495
        }
496
    }
497

    
498
    s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
499
                                            qed_need_check_timer_cb, s);
500

    
501
out:
502
    if (ret) {
503
        qed_free_l2_cache(&s->l2_cache);
504
        qemu_vfree(s->l1_table);
505
    }
506
    return ret;
507
}
508

    
509
/* We have nothing to do for QED reopen, stubs just return
510
 * success */
511
static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
512
                                   BlockReopenQueue *queue, Error **errp)
513
{
514
    return 0;
515
}
516

    
517
static void bdrv_qed_close(BlockDriverState *bs)
518
{
519
    BDRVQEDState *s = bs->opaque;
520

    
521
    qed_cancel_need_check_timer(s);
522
    timer_free(s->need_check_timer);
523

    
524
    /* Ensure writes reach stable storage */
525
    bdrv_flush(bs->file);
526

    
527
    /* Clean shutdown, no check required on next open */
528
    if (s->header.features & QED_F_NEED_CHECK) {
529
        s->header.features &= ~QED_F_NEED_CHECK;
530
        qed_write_header_sync(s);
531
    }
532

    
533
    qed_free_l2_cache(&s->l2_cache);
534
    qemu_vfree(s->l1_table);
535
}
536

    
537
static int qed_create(const char *filename, uint32_t cluster_size,
538
                      uint64_t image_size, uint32_t table_size,
539
                      const char *backing_file, const char *backing_fmt)
540
{
541
    QEDHeader header = {
542
        .magic = QED_MAGIC,
543
        .cluster_size = cluster_size,
544
        .table_size = table_size,
545
        .header_size = 1,
546
        .features = 0,
547
        .compat_features = 0,
548
        .l1_table_offset = cluster_size,
549
        .image_size = image_size,
550
    };
551
    QEDHeader le_header;
552
    uint8_t *l1_table = NULL;
553
    size_t l1_size = header.cluster_size * header.table_size;
554
    Error *local_err = NULL;
555
    int ret = 0;
556
    BlockDriverState *bs = NULL;
557

    
558
    ret = bdrv_create_file(filename, NULL);
559
    if (ret < 0) {
560
        return ret;
561
    }
562

    
563
    ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB,
564
                         &local_err);
565
    if (ret < 0) {
566
        qerror_report_err(local_err);
567
        error_free(local_err);
568
        return ret;
569
    }
570

    
571
    /* File must start empty and grow, check truncate is supported */
572
    ret = bdrv_truncate(bs, 0);
573
    if (ret < 0) {
574
        goto out;
575
    }
576

    
577
    if (backing_file) {
578
        header.features |= QED_F_BACKING_FILE;
579
        header.backing_filename_offset = sizeof(le_header);
580
        header.backing_filename_size = strlen(backing_file);
581

    
582
        if (qed_fmt_is_raw(backing_fmt)) {
583
            header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
584
        }
585
    }
586

    
587
    qed_header_cpu_to_le(&header, &le_header);
588
    ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
589
    if (ret < 0) {
590
        goto out;
591
    }
592
    ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
593
                      header.backing_filename_size);
594
    if (ret < 0) {
595
        goto out;
596
    }
597

    
598
    l1_table = g_malloc0(l1_size);
599
    ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
600
    if (ret < 0) {
601
        goto out;
602
    }
603

    
604
    ret = 0; /* success */
605
out:
606
    g_free(l1_table);
607
    bdrv_unref(bs);
608
    return ret;
609
}
610

    
611
static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options,
612
                           Error **errp)
613
{
614
    uint64_t image_size = 0;
615
    uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
616
    uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
617
    const char *backing_file = NULL;
618
    const char *backing_fmt = NULL;
619

    
620
    while (options && options->name) {
621
        if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
622
            image_size = options->value.n;
623
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
624
            backing_file = options->value.s;
625
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
626
            backing_fmt = options->value.s;
627
        } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
628
            if (options->value.n) {
629
                cluster_size = options->value.n;
630
            }
631
        } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
632
            if (options->value.n) {
633
                table_size = options->value.n;
634
            }
635
        }
636
        options++;
637
    }
638

    
639
    if (!qed_is_cluster_size_valid(cluster_size)) {
640
        fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
641
                QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
642
        return -EINVAL;
643
    }
644
    if (!qed_is_table_size_valid(table_size)) {
645
        fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
646
                QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
647
        return -EINVAL;
648
    }
649
    if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
650
        fprintf(stderr, "QED image size must be a non-zero multiple of "
651
                        "cluster size and less than %" PRIu64 " bytes\n",
652
                qed_max_image_size(cluster_size, table_size));
653
        return -EINVAL;
654
    }
655

    
656
    return qed_create(filename, cluster_size, image_size, table_size,
657
                      backing_file, backing_fmt);
658
}
659

    
660
typedef struct {
661
    BlockDriverState *bs;
662
    Coroutine *co;
663
    uint64_t pos;
664
    int64_t status;
665
    int *pnum;
666
} QEDIsAllocatedCB;
667

    
668
static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
669
{
670
    QEDIsAllocatedCB *cb = opaque;
671
    BDRVQEDState *s = cb->bs->opaque;
672
    *cb->pnum = len / BDRV_SECTOR_SIZE;
673
    switch (ret) {
674
    case QED_CLUSTER_FOUND:
675
        offset |= qed_offset_into_cluster(s, cb->pos);
676
        cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
677
        break;
678
    case QED_CLUSTER_ZERO:
679
        cb->status = BDRV_BLOCK_ZERO;
680
        break;
681
    case QED_CLUSTER_L2:
682
    case QED_CLUSTER_L1:
683
        cb->status = 0;
684
        break;
685
    default:
686
        assert(ret < 0);
687
        cb->status = ret;
688
        break;
689
    }
690

    
691
    if (cb->co) {
692
        qemu_coroutine_enter(cb->co, NULL);
693
    }
694
}
695

    
696
static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
697
                                                 int64_t sector_num,
698
                                                 int nb_sectors, int *pnum)
699
{
700
    BDRVQEDState *s = bs->opaque;
701
    size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
702
    QEDIsAllocatedCB cb = {
703
        .bs = bs,
704
        .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
705
        .status = BDRV_BLOCK_OFFSET_MASK,
706
        .pnum = pnum,
707
    };
708
    QEDRequest request = { .l2_table = NULL };
709

    
710
    qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
711

    
712
    /* Now sleep if the callback wasn't invoked immediately */
713
    while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
714
        cb.co = qemu_coroutine_self();
715
        qemu_coroutine_yield();
716
    }
717

    
718
    qed_unref_l2_cache_entry(request.l2_table);
719

    
720
    return cb.status;
721
}
722

    
723
static int bdrv_qed_make_empty(BlockDriverState *bs)
724
{
725
    return -ENOTSUP;
726
}
727

    
728
static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
729
{
730
    return acb->common.bs->opaque;
731
}
732

    
733
/**
734
 * Read from the backing file or zero-fill if no backing file
735
 *
736
 * @s:          QED state
737
 * @pos:        Byte position in device
738
 * @qiov:       Destination I/O vector
739
 * @cb:         Completion function
740
 * @opaque:     User data for completion function
741
 *
742
 * This function reads qiov->size bytes starting at pos from the backing file.
743
 * If there is no backing file then zeroes are read.
744
 */
745
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
746
                                  QEMUIOVector *qiov,
747
                                  BlockDriverCompletionFunc *cb, void *opaque)
748
{
749
    uint64_t backing_length = 0;
750
    size_t size;
751

    
752
    /* If there is a backing file, get its length.  Treat the absence of a
753
     * backing file like a zero length backing file.
754
     */
755
    if (s->bs->backing_hd) {
756
        int64_t l = bdrv_getlength(s->bs->backing_hd);
757
        if (l < 0) {
758
            cb(opaque, l);
759
            return;
760
        }
761
        backing_length = l;
762
    }
763

    
764
    /* Zero all sectors if reading beyond the end of the backing file */
765
    if (pos >= backing_length ||
766
        pos + qiov->size > backing_length) {
767
        qemu_iovec_memset(qiov, 0, 0, qiov->size);
768
    }
769

    
770
    /* Complete now if there are no backing file sectors to read */
771
    if (pos >= backing_length) {
772
        cb(opaque, 0);
773
        return;
774
    }
775

    
776
    /* If the read straddles the end of the backing file, shorten it */
777
    size = MIN((uint64_t)backing_length - pos, qiov->size);
778

    
779
    BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
780
    bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
781
                   qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
782
}
783

    
784
typedef struct {
785
    GenericCB gencb;
786
    BDRVQEDState *s;
787
    QEMUIOVector qiov;
788
    struct iovec iov;
789
    uint64_t offset;
790
} CopyFromBackingFileCB;
791

    
792
static void qed_copy_from_backing_file_cb(void *opaque, int ret)
793
{
794
    CopyFromBackingFileCB *copy_cb = opaque;
795
    qemu_vfree(copy_cb->iov.iov_base);
796
    gencb_complete(&copy_cb->gencb, ret);
797
}
798

    
799
static void qed_copy_from_backing_file_write(void *opaque, int ret)
800
{
801
    CopyFromBackingFileCB *copy_cb = opaque;
802
    BDRVQEDState *s = copy_cb->s;
803

    
804
    if (ret) {
805
        qed_copy_from_backing_file_cb(copy_cb, ret);
806
        return;
807
    }
808

    
809
    BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
810
    bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
811
                    &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
812
                    qed_copy_from_backing_file_cb, copy_cb);
813
}
814

    
815
/**
816
 * Copy data from backing file into the image
817
 *
818
 * @s:          QED state
819
 * @pos:        Byte position in device
820
 * @len:        Number of bytes
821
 * @offset:     Byte offset in image file
822
 * @cb:         Completion function
823
 * @opaque:     User data for completion function
824
 */
825
static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
826
                                       uint64_t len, uint64_t offset,
827
                                       BlockDriverCompletionFunc *cb,
828
                                       void *opaque)
829
{
830
    CopyFromBackingFileCB *copy_cb;
831

    
832
    /* Skip copy entirely if there is no work to do */
833
    if (len == 0) {
834
        cb(opaque, 0);
835
        return;
836
    }
837

    
838
    copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
839
    copy_cb->s = s;
840
    copy_cb->offset = offset;
841
    copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
842
    copy_cb->iov.iov_len = len;
843
    qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
844

    
845
    qed_read_backing_file(s, pos, &copy_cb->qiov,
846
                          qed_copy_from_backing_file_write, copy_cb);
847
}
848

    
849
/**
850
 * Link one or more contiguous clusters into a table
851
 *
852
 * @s:              QED state
853
 * @table:          L2 table
854
 * @index:          First cluster index
855
 * @n:              Number of contiguous clusters
856
 * @cluster:        First cluster offset
857
 *
858
 * The cluster offset may be an allocated byte offset in the image file, the
859
 * zero cluster marker, or the unallocated cluster marker.
860
 */
861
static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
862
                                unsigned int n, uint64_t cluster)
863
{
864
    int i;
865
    for (i = index; i < index + n; i++) {
866
        table->offsets[i] = cluster;
867
        if (!qed_offset_is_unalloc_cluster(cluster) &&
868
            !qed_offset_is_zero_cluster(cluster)) {
869
            cluster += s->header.cluster_size;
870
        }
871
    }
872
}
873

    
874
static void qed_aio_complete_bh(void *opaque)
875
{
876
    QEDAIOCB *acb = opaque;
877
    BlockDriverCompletionFunc *cb = acb->common.cb;
878
    void *user_opaque = acb->common.opaque;
879
    int ret = acb->bh_ret;
880
    bool *finished = acb->finished;
881

    
882
    qemu_bh_delete(acb->bh);
883
    qemu_aio_release(acb);
884

    
885
    /* Invoke callback */
886
    cb(user_opaque, ret);
887

    
888
    /* Signal cancel completion */
889
    if (finished) {
890
        *finished = true;
891
    }
892
}
893

    
894
static void qed_aio_complete(QEDAIOCB *acb, int ret)
895
{
896
    BDRVQEDState *s = acb_to_s(acb);
897

    
898
    trace_qed_aio_complete(s, acb, ret);
899

    
900
    /* Free resources */
901
    qemu_iovec_destroy(&acb->cur_qiov);
902
    qed_unref_l2_cache_entry(acb->request.l2_table);
903

    
904
    /* Free the buffer we may have allocated for zero writes */
905
    if (acb->flags & QED_AIOCB_ZERO) {
906
        qemu_vfree(acb->qiov->iov[0].iov_base);
907
        acb->qiov->iov[0].iov_base = NULL;
908
    }
909

    
910
    /* Arrange for a bh to invoke the completion function */
911
    acb->bh_ret = ret;
912
    acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
913
    qemu_bh_schedule(acb->bh);
914

    
915
    /* Start next allocating write request waiting behind this one.  Note that
916
     * requests enqueue themselves when they first hit an unallocated cluster
917
     * but they wait until the entire request is finished before waking up the
918
     * next request in the queue.  This ensures that we don't cycle through
919
     * requests multiple times but rather finish one at a time completely.
920
     */
921
    if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
922
        QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
923
        acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
924
        if (acb) {
925
            qed_aio_next_io(acb, 0);
926
        } else if (s->header.features & QED_F_NEED_CHECK) {
927
            qed_start_need_check_timer(s);
928
        }
929
    }
930
}
931

    
932
/**
933
 * Commit the current L2 table to the cache
934
 */
935
static void qed_commit_l2_update(void *opaque, int ret)
936
{
937
    QEDAIOCB *acb = opaque;
938
    BDRVQEDState *s = acb_to_s(acb);
939
    CachedL2Table *l2_table = acb->request.l2_table;
940
    uint64_t l2_offset = l2_table->offset;
941

    
942
    qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
943

    
944
    /* This is guaranteed to succeed because we just committed the entry to the
945
     * cache.
946
     */
947
    acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
948
    assert(acb->request.l2_table != NULL);
949

    
950
    qed_aio_next_io(opaque, ret);
951
}
952

    
953
/**
954
 * Update L1 table with new L2 table offset and write it out
955
 */
956
static void qed_aio_write_l1_update(void *opaque, int ret)
957
{
958
    QEDAIOCB *acb = opaque;
959
    BDRVQEDState *s = acb_to_s(acb);
960
    int index;
961

    
962
    if (ret) {
963
        qed_aio_complete(acb, ret);
964
        return;
965
    }
966

    
967
    index = qed_l1_index(s, acb->cur_pos);
968
    s->l1_table->offsets[index] = acb->request.l2_table->offset;
969

    
970
    qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
971
}
972

    
973
/**
974
 * Update L2 table with new cluster offsets and write them out
975
 */
976
static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
977
{
978
    BDRVQEDState *s = acb_to_s(acb);
979
    bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
980
    int index;
981

    
982
    if (ret) {
983
        goto err;
984
    }
985

    
986
    if (need_alloc) {
987
        qed_unref_l2_cache_entry(acb->request.l2_table);
988
        acb->request.l2_table = qed_new_l2_table(s);
989
    }
990

    
991
    index = qed_l2_index(s, acb->cur_pos);
992
    qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
993
                         offset);
994

    
995
    if (need_alloc) {
996
        /* Write out the whole new L2 table */
997
        qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
998
                            qed_aio_write_l1_update, acb);
999
    } else {
1000
        /* Write out only the updated part of the L2 table */
1001
        qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1002
                            qed_aio_next_io, acb);
1003
    }
1004
    return;
1005

    
1006
err:
1007
    qed_aio_complete(acb, ret);
1008
}
1009

    
1010
static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1011
{
1012
    QEDAIOCB *acb = opaque;
1013
    qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1014
}
1015

    
1016
/**
1017
 * Flush new data clusters before updating the L2 table
1018
 *
1019
 * This flush is necessary when a backing file is in use.  A crash during an
1020
 * allocating write could result in empty clusters in the image.  If the write
1021
 * only touched a subregion of the cluster, then backing image sectors have
1022
 * been lost in the untouched region.  The solution is to flush after writing a
1023
 * new data cluster and before updating the L2 table.
1024
 */
1025
static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1026
{
1027
    QEDAIOCB *acb = opaque;
1028
    BDRVQEDState *s = acb_to_s(acb);
1029

    
1030
    if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
1031
        qed_aio_complete(acb, -EIO);
1032
    }
1033
}
1034

    
1035
/**
1036
 * Write data to the image file
1037
 */
1038
static void qed_aio_write_main(void *opaque, int ret)
1039
{
1040
    QEDAIOCB *acb = opaque;
1041
    BDRVQEDState *s = acb_to_s(acb);
1042
    uint64_t offset = acb->cur_cluster +
1043
                      qed_offset_into_cluster(s, acb->cur_pos);
1044
    BlockDriverCompletionFunc *next_fn;
1045

    
1046
    trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1047

    
1048
    if (ret) {
1049
        qed_aio_complete(acb, ret);
1050
        return;
1051
    }
1052

    
1053
    if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1054
        next_fn = qed_aio_next_io;
1055
    } else {
1056
        if (s->bs->backing_hd) {
1057
            next_fn = qed_aio_write_flush_before_l2_update;
1058
        } else {
1059
            next_fn = qed_aio_write_l2_update_cb;
1060
        }
1061
    }
1062

    
1063
    BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1064
    bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1065
                    &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1066
                    next_fn, acb);
1067
}
1068

    
1069
/**
1070
 * Populate back untouched region of new data cluster
1071
 */
1072
static void qed_aio_write_postfill(void *opaque, int ret)
1073
{
1074
    QEDAIOCB *acb = opaque;
1075
    BDRVQEDState *s = acb_to_s(acb);
1076
    uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1077
    uint64_t len =
1078
        qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1079
    uint64_t offset = acb->cur_cluster +
1080
                      qed_offset_into_cluster(s, acb->cur_pos) +
1081
                      acb->cur_qiov.size;
1082

    
1083
    if (ret) {
1084
        qed_aio_complete(acb, ret);
1085
        return;
1086
    }
1087

    
1088
    trace_qed_aio_write_postfill(s, acb, start, len, offset);
1089
    qed_copy_from_backing_file(s, start, len, offset,
1090
                                qed_aio_write_main, acb);
1091
}
1092

    
1093
/**
1094
 * Populate front untouched region of new data cluster
1095
 */
1096
static void qed_aio_write_prefill(void *opaque, int ret)
1097
{
1098
    QEDAIOCB *acb = opaque;
1099
    BDRVQEDState *s = acb_to_s(acb);
1100
    uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1101
    uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1102

    
1103
    trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1104
    qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1105
                                qed_aio_write_postfill, acb);
1106
}
1107

    
1108
/**
1109
 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1110
 */
1111
static bool qed_should_set_need_check(BDRVQEDState *s)
1112
{
1113
    /* The flush before L2 update path ensures consistency */
1114
    if (s->bs->backing_hd) {
1115
        return false;
1116
    }
1117

    
1118
    return !(s->header.features & QED_F_NEED_CHECK);
1119
}
1120

    
1121
static void qed_aio_write_zero_cluster(void *opaque, int ret)
1122
{
1123
    QEDAIOCB *acb = opaque;
1124

    
1125
    if (ret) {
1126
        qed_aio_complete(acb, ret);
1127
        return;
1128
    }
1129

    
1130
    qed_aio_write_l2_update(acb, 0, 1);
1131
}
1132

    
1133
/**
1134
 * Write new data cluster
1135
 *
1136
 * @acb:        Write request
1137
 * @len:        Length in bytes
1138
 *
1139
 * This path is taken when writing to previously unallocated clusters.
1140
 */
1141
static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1142
{
1143
    BDRVQEDState *s = acb_to_s(acb);
1144
    BlockDriverCompletionFunc *cb;
1145

    
1146
    /* Cancel timer when the first allocating request comes in */
1147
    if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1148
        qed_cancel_need_check_timer(s);
1149
    }
1150

    
1151
    /* Freeze this request if another allocating write is in progress */
1152
    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1153
        QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1154
    }
1155
    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1156
        s->allocating_write_reqs_plugged) {
1157
        return; /* wait for existing request to finish */
1158
    }
1159

    
1160
    acb->cur_nclusters = qed_bytes_to_clusters(s,
1161
            qed_offset_into_cluster(s, acb->cur_pos) + len);
1162
    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1163

    
1164
    if (acb->flags & QED_AIOCB_ZERO) {
1165
        /* Skip ahead if the clusters are already zero */
1166
        if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1167
            qed_aio_next_io(acb, 0);
1168
            return;
1169
        }
1170

    
1171
        cb = qed_aio_write_zero_cluster;
1172
    } else {
1173
        cb = qed_aio_write_prefill;
1174
        acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1175
    }
1176

    
1177
    if (qed_should_set_need_check(s)) {
1178
        s->header.features |= QED_F_NEED_CHECK;
1179
        qed_write_header(s, cb, acb);
1180
    } else {
1181
        cb(acb, 0);
1182
    }
1183
}
1184

    
1185
/**
1186
 * Write data cluster in place
1187
 *
1188
 * @acb:        Write request
1189
 * @offset:     Cluster offset in bytes
1190
 * @len:        Length in bytes
1191
 *
1192
 * This path is taken when writing to already allocated clusters.
1193
 */
1194
static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1195
{
1196
    /* Allocate buffer for zero writes */
1197
    if (acb->flags & QED_AIOCB_ZERO) {
1198
        struct iovec *iov = acb->qiov->iov;
1199

    
1200
        if (!iov->iov_base) {
1201
            iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
1202
            memset(iov->iov_base, 0, iov->iov_len);
1203
        }
1204
    }
1205

    
1206
    /* Calculate the I/O vector */
1207
    acb->cur_cluster = offset;
1208
    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1209

    
1210
    /* Do the actual write */
1211
    qed_aio_write_main(acb, 0);
1212
}
1213

    
1214
/**
1215
 * Write data cluster
1216
 *
1217
 * @opaque:     Write request
1218
 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1219
 *              or -errno
1220
 * @offset:     Cluster offset in bytes
1221
 * @len:        Length in bytes
1222
 *
1223
 * Callback from qed_find_cluster().
1224
 */
1225
static void qed_aio_write_data(void *opaque, int ret,
1226
                               uint64_t offset, size_t len)
1227
{
1228
    QEDAIOCB *acb = opaque;
1229

    
1230
    trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1231

    
1232
    acb->find_cluster_ret = ret;
1233

    
1234
    switch (ret) {
1235
    case QED_CLUSTER_FOUND:
1236
        qed_aio_write_inplace(acb, offset, len);
1237
        break;
1238

    
1239
    case QED_CLUSTER_L2:
1240
    case QED_CLUSTER_L1:
1241
    case QED_CLUSTER_ZERO:
1242
        qed_aio_write_alloc(acb, len);
1243
        break;
1244

    
1245
    default:
1246
        qed_aio_complete(acb, ret);
1247
        break;
1248
    }
1249
}
1250

    
1251
/**
1252
 * Read data cluster
1253
 *
1254
 * @opaque:     Read request
1255
 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1256
 *              or -errno
1257
 * @offset:     Cluster offset in bytes
1258
 * @len:        Length in bytes
1259
 *
1260
 * Callback from qed_find_cluster().
1261
 */
1262
static void qed_aio_read_data(void *opaque, int ret,
1263
                              uint64_t offset, size_t len)
1264
{
1265
    QEDAIOCB *acb = opaque;
1266
    BDRVQEDState *s = acb_to_s(acb);
1267
    BlockDriverState *bs = acb->common.bs;
1268

    
1269
    /* Adjust offset into cluster */
1270
    offset += qed_offset_into_cluster(s, acb->cur_pos);
1271

    
1272
    trace_qed_aio_read_data(s, acb, ret, offset, len);
1273

    
1274
    if (ret < 0) {
1275
        goto err;
1276
    }
1277

    
1278
    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1279

    
1280
    /* Handle zero cluster and backing file reads */
1281
    if (ret == QED_CLUSTER_ZERO) {
1282
        qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1283
        qed_aio_next_io(acb, 0);
1284
        return;
1285
    } else if (ret != QED_CLUSTER_FOUND) {
1286
        qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1287
                              qed_aio_next_io, acb);
1288
        return;
1289
    }
1290

    
1291
    BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1292
    bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1293
                   &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1294
                   qed_aio_next_io, acb);
1295
    return;
1296

    
1297
err:
1298
    qed_aio_complete(acb, ret);
1299
}
1300

    
1301
/**
1302
 * Begin next I/O or complete the request
1303
 */
1304
static void qed_aio_next_io(void *opaque, int ret)
1305
{
1306
    QEDAIOCB *acb = opaque;
1307
    BDRVQEDState *s = acb_to_s(acb);
1308
    QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1309
                                qed_aio_write_data : qed_aio_read_data;
1310

    
1311
    trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1312

    
1313
    /* Handle I/O error */
1314
    if (ret) {
1315
        qed_aio_complete(acb, ret);
1316
        return;
1317
    }
1318

    
1319
    acb->qiov_offset += acb->cur_qiov.size;
1320
    acb->cur_pos += acb->cur_qiov.size;
1321
    qemu_iovec_reset(&acb->cur_qiov);
1322

    
1323
    /* Complete request */
1324
    if (acb->cur_pos >= acb->end_pos) {
1325
        qed_aio_complete(acb, 0);
1326
        return;
1327
    }
1328

    
1329
    /* Find next cluster and start I/O */
1330
    qed_find_cluster(s, &acb->request,
1331
                      acb->cur_pos, acb->end_pos - acb->cur_pos,
1332
                      io_fn, acb);
1333
}
1334

    
1335
static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1336
                                       int64_t sector_num,
1337
                                       QEMUIOVector *qiov, int nb_sectors,
1338
                                       BlockDriverCompletionFunc *cb,
1339
                                       void *opaque, int flags)
1340
{
1341
    QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1342

    
1343
    trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1344
                        opaque, flags);
1345

    
1346
    acb->flags = flags;
1347
    acb->finished = NULL;
1348
    acb->qiov = qiov;
1349
    acb->qiov_offset = 0;
1350
    acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1351
    acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1352
    acb->request.l2_table = NULL;
1353
    qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1354

    
1355
    /* Start request */
1356
    qed_aio_next_io(acb, 0);
1357
    return &acb->common;
1358
}
1359

    
1360
static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1361
                                            int64_t sector_num,
1362
                                            QEMUIOVector *qiov, int nb_sectors,
1363
                                            BlockDriverCompletionFunc *cb,
1364
                                            void *opaque)
1365
{
1366
    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1367
}
1368

    
1369
static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1370
                                             int64_t sector_num,
1371
                                             QEMUIOVector *qiov, int nb_sectors,
1372
                                             BlockDriverCompletionFunc *cb,
1373
                                             void *opaque)
1374
{
1375
    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1376
                         opaque, QED_AIOCB_WRITE);
1377
}
1378

    
1379
typedef struct {
1380
    Coroutine *co;
1381
    int ret;
1382
    bool done;
1383
} QEDWriteZeroesCB;
1384

    
1385
static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1386
{
1387
    QEDWriteZeroesCB *cb = opaque;
1388

    
1389
    cb->done = true;
1390
    cb->ret = ret;
1391
    if (cb->co) {
1392
        qemu_coroutine_enter(cb->co, NULL);
1393
    }
1394
}
1395

    
1396
static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1397
                                                 int64_t sector_num,
1398
                                                 int nb_sectors)
1399
{
1400
    BlockDriverAIOCB *blockacb;
1401
    BDRVQEDState *s = bs->opaque;
1402
    QEDWriteZeroesCB cb = { .done = false };
1403
    QEMUIOVector qiov;
1404
    struct iovec iov;
1405

    
1406
    /* Refuse if there are untouched backing file sectors */
1407
    if (bs->backing_hd) {
1408
        if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1409
            return -ENOTSUP;
1410
        }
1411
        if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1412
            return -ENOTSUP;
1413
        }
1414
    }
1415

    
1416
    /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1417
     * then it will be allocated during request processing.
1418
     */
1419
    iov.iov_base = NULL,
1420
    iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
1421

    
1422
    qemu_iovec_init_external(&qiov, &iov, 1);
1423
    blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1424
                             qed_co_write_zeroes_cb, &cb,
1425
                             QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1426
    if (!blockacb) {
1427
        return -EIO;
1428
    }
1429
    if (!cb.done) {
1430
        cb.co = qemu_coroutine_self();
1431
        qemu_coroutine_yield();
1432
    }
1433
    assert(cb.done);
1434
    return cb.ret;
1435
}
1436

    
1437
static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1438
{
1439
    BDRVQEDState *s = bs->opaque;
1440
    uint64_t old_image_size;
1441
    int ret;
1442

    
1443
    if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1444
                                 s->header.table_size)) {
1445
        return -EINVAL;
1446
    }
1447

    
1448
    /* Shrinking is currently not supported */
1449
    if ((uint64_t)offset < s->header.image_size) {
1450
        return -ENOTSUP;
1451
    }
1452

    
1453
    old_image_size = s->header.image_size;
1454
    s->header.image_size = offset;
1455
    ret = qed_write_header_sync(s);
1456
    if (ret < 0) {
1457
        s->header.image_size = old_image_size;
1458
    }
1459
    return ret;
1460
}
1461

    
1462
static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1463
{
1464
    BDRVQEDState *s = bs->opaque;
1465
    return s->header.image_size;
1466
}
1467

    
1468
static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1469
{
1470
    BDRVQEDState *s = bs->opaque;
1471

    
1472
    memset(bdi, 0, sizeof(*bdi));
1473
    bdi->cluster_size = s->header.cluster_size;
1474
    bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1475
    return 0;
1476
}
1477

    
1478
static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1479
                                        const char *backing_file,
1480
                                        const char *backing_fmt)
1481
{
1482
    BDRVQEDState *s = bs->opaque;
1483
    QEDHeader new_header, le_header;
1484
    void *buffer;
1485
    size_t buffer_len, backing_file_len;
1486
    int ret;
1487

    
1488
    /* Refuse to set backing filename if unknown compat feature bits are
1489
     * active.  If the image uses an unknown compat feature then we may not
1490
     * know the layout of data following the header structure and cannot safely
1491
     * add a new string.
1492
     */
1493
    if (backing_file && (s->header.compat_features &
1494
                         ~QED_COMPAT_FEATURE_MASK)) {
1495
        return -ENOTSUP;
1496
    }
1497

    
1498
    memcpy(&new_header, &s->header, sizeof(new_header));
1499

    
1500
    new_header.features &= ~(QED_F_BACKING_FILE |
1501
                             QED_F_BACKING_FORMAT_NO_PROBE);
1502

    
1503
    /* Adjust feature flags */
1504
    if (backing_file) {
1505
        new_header.features |= QED_F_BACKING_FILE;
1506

    
1507
        if (qed_fmt_is_raw(backing_fmt)) {
1508
            new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1509
        }
1510
    }
1511

    
1512
    /* Calculate new header size */
1513
    backing_file_len = 0;
1514

    
1515
    if (backing_file) {
1516
        backing_file_len = strlen(backing_file);
1517
    }
1518

    
1519
    buffer_len = sizeof(new_header);
1520
    new_header.backing_filename_offset = buffer_len;
1521
    new_header.backing_filename_size = backing_file_len;
1522
    buffer_len += backing_file_len;
1523

    
1524
    /* Make sure we can rewrite header without failing */
1525
    if (buffer_len > new_header.header_size * new_header.cluster_size) {
1526
        return -ENOSPC;
1527
    }
1528

    
1529
    /* Prepare new header */
1530
    buffer = g_malloc(buffer_len);
1531

    
1532
    qed_header_cpu_to_le(&new_header, &le_header);
1533
    memcpy(buffer, &le_header, sizeof(le_header));
1534
    buffer_len = sizeof(le_header);
1535

    
1536
    if (backing_file) {
1537
        memcpy(buffer + buffer_len, backing_file, backing_file_len);
1538
        buffer_len += backing_file_len;
1539
    }
1540

    
1541
    /* Write new header */
1542
    ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1543
    g_free(buffer);
1544
    if (ret == 0) {
1545
        memcpy(&s->header, &new_header, sizeof(new_header));
1546
    }
1547
    return ret;
1548
}
1549

    
1550
static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
1551
{
1552
    BDRVQEDState *s = bs->opaque;
1553

    
1554
    bdrv_qed_close(bs);
1555
    memset(s, 0, sizeof(BDRVQEDState));
1556
    bdrv_qed_open(bs, NULL, bs->open_flags, NULL);
1557
}
1558

    
1559
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1560
                          BdrvCheckMode fix)
1561
{
1562
    BDRVQEDState *s = bs->opaque;
1563

    
1564
    return qed_check(s, result, !!fix);
1565
}
1566

    
1567
static QEMUOptionParameter qed_create_options[] = {
1568
    {
1569
        .name = BLOCK_OPT_SIZE,
1570
        .type = OPT_SIZE,
1571
        .help = "Virtual disk size (in bytes)"
1572
    }, {
1573
        .name = BLOCK_OPT_BACKING_FILE,
1574
        .type = OPT_STRING,
1575
        .help = "File name of a base image"
1576
    }, {
1577
        .name = BLOCK_OPT_BACKING_FMT,
1578
        .type = OPT_STRING,
1579
        .help = "Image format of the base image"
1580
    }, {
1581
        .name = BLOCK_OPT_CLUSTER_SIZE,
1582
        .type = OPT_SIZE,
1583
        .help = "Cluster size (in bytes)",
1584
        .value = { .n = QED_DEFAULT_CLUSTER_SIZE },
1585
    }, {
1586
        .name = BLOCK_OPT_TABLE_SIZE,
1587
        .type = OPT_SIZE,
1588
        .help = "L1/L2 table size (in clusters)"
1589
    },
1590
    { /* end of list */ }
1591
};
1592

    
1593
static BlockDriver bdrv_qed = {
1594
    .format_name              = "qed",
1595
    .instance_size            = sizeof(BDRVQEDState),
1596
    .create_options           = qed_create_options,
1597

    
1598
    .bdrv_probe               = bdrv_qed_probe,
1599
    .bdrv_rebind              = bdrv_qed_rebind,
1600
    .bdrv_open                = bdrv_qed_open,
1601
    .bdrv_close               = bdrv_qed_close,
1602
    .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1603
    .bdrv_create              = bdrv_qed_create,
1604
    .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1605
    .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1606
    .bdrv_make_empty          = bdrv_qed_make_empty,
1607
    .bdrv_aio_readv           = bdrv_qed_aio_readv,
1608
    .bdrv_aio_writev          = bdrv_qed_aio_writev,
1609
    .bdrv_co_write_zeroes     = bdrv_qed_co_write_zeroes,
1610
    .bdrv_truncate            = bdrv_qed_truncate,
1611
    .bdrv_getlength           = bdrv_qed_getlength,
1612
    .bdrv_get_info            = bdrv_qed_get_info,
1613
    .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1614
    .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1615
    .bdrv_check               = bdrv_qed_check,
1616
};
1617

    
1618
static void bdrv_qed_init(void)
1619
{
1620
    bdrv_register(&bdrv_qed);
1621
}
1622

    
1623
block_init(bdrv_qed_init);