Statistics
| Branch: | Revision:

root / block / qed.c @ b7d5a5b8

History | View | Annotate | Download (43.8 kB)

1
/*
2
 * QEMU Enhanced Disk Format
3
 *
4
 * Copyright IBM, Corp. 2010
5
 *
6
 * Authors:
7
 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *
10
 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11
 * See the COPYING.LIB file in the top-level directory.
12
 *
13
 */
14

    
15
#include "qemu-timer.h"
16
#include "trace.h"
17
#include "qed.h"
18
#include "qerror.h"
19
#include "migration.h"
20

    
21
static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
22
{
23
    QEDAIOCB *acb = (QEDAIOCB *)blockacb;
24
    bool finished = false;
25

    
26
    /* Wait for the request to finish */
27
    acb->finished = &finished;
28
    while (!finished) {
29
        qemu_aio_wait();
30
    }
31
}
32

    
33
static AIOPool qed_aio_pool = {
34
    .aiocb_size         = sizeof(QEDAIOCB),
35
    .cancel             = qed_aio_cancel,
36
};
37

    
38
static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
39
                          const char *filename)
40
{
41
    const QEDHeader *header = (const QEDHeader *)buf;
42

    
43
    if (buf_size < sizeof(*header)) {
44
        return 0;
45
    }
46
    if (le32_to_cpu(header->magic) != QED_MAGIC) {
47
        return 0;
48
    }
49
    return 100;
50
}
51

    
52
/**
53
 * Check whether an image format is raw
54
 *
55
 * @fmt:    Backing file format, may be NULL
56
 */
57
static bool qed_fmt_is_raw(const char *fmt)
58
{
59
    return fmt && strcmp(fmt, "raw") == 0;
60
}
61

    
62
static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
63
{
64
    cpu->magic = le32_to_cpu(le->magic);
65
    cpu->cluster_size = le32_to_cpu(le->cluster_size);
66
    cpu->table_size = le32_to_cpu(le->table_size);
67
    cpu->header_size = le32_to_cpu(le->header_size);
68
    cpu->features = le64_to_cpu(le->features);
69
    cpu->compat_features = le64_to_cpu(le->compat_features);
70
    cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
71
    cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
72
    cpu->image_size = le64_to_cpu(le->image_size);
73
    cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
74
    cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
75
}
76

    
77
static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
78
{
79
    le->magic = cpu_to_le32(cpu->magic);
80
    le->cluster_size = cpu_to_le32(cpu->cluster_size);
81
    le->table_size = cpu_to_le32(cpu->table_size);
82
    le->header_size = cpu_to_le32(cpu->header_size);
83
    le->features = cpu_to_le64(cpu->features);
84
    le->compat_features = cpu_to_le64(cpu->compat_features);
85
    le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
86
    le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
87
    le->image_size = cpu_to_le64(cpu->image_size);
88
    le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
89
    le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
90
}
91

    
92
static int qed_write_header_sync(BDRVQEDState *s)
93
{
94
    QEDHeader le;
95
    int ret;
96

    
97
    qed_header_cpu_to_le(&s->header, &le);
98
    ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
99
    if (ret != sizeof(le)) {
100
        return ret;
101
    }
102
    return 0;
103
}
104

    
105
typedef struct {
106
    GenericCB gencb;
107
    BDRVQEDState *s;
108
    struct iovec iov;
109
    QEMUIOVector qiov;
110
    int nsectors;
111
    uint8_t *buf;
112
} QEDWriteHeaderCB;
113

    
114
static void qed_write_header_cb(void *opaque, int ret)
115
{
116
    QEDWriteHeaderCB *write_header_cb = opaque;
117

    
118
    qemu_vfree(write_header_cb->buf);
119
    gencb_complete(write_header_cb, ret);
120
}
121

    
122
static void qed_write_header_read_cb(void *opaque, int ret)
123
{
124
    QEDWriteHeaderCB *write_header_cb = opaque;
125
    BDRVQEDState *s = write_header_cb->s;
126
    BlockDriverAIOCB *acb;
127

    
128
    if (ret) {
129
        qed_write_header_cb(write_header_cb, ret);
130
        return;
131
    }
132

    
133
    /* Update header */
134
    qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
135

    
136
    acb = bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
137
                          write_header_cb->nsectors, qed_write_header_cb,
138
                          write_header_cb);
139
    if (!acb) {
140
        qed_write_header_cb(write_header_cb, -EIO);
141
    }
142
}
143

    
144
/**
145
 * Update header in-place (does not rewrite backing filename or other strings)
146
 *
147
 * This function only updates known header fields in-place and does not affect
148
 * extra data after the QED header.
149
 */
150
static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
151
                             void *opaque)
152
{
153
    /* We must write full sectors for O_DIRECT but cannot necessarily generate
154
     * the data following the header if an unrecognized compat feature is
155
     * active.  Therefore, first read the sectors containing the header, update
156
     * them, and write back.
157
     */
158

    
159
    BlockDriverAIOCB *acb;
160
    int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
161
                   BDRV_SECTOR_SIZE;
162
    size_t len = nsectors * BDRV_SECTOR_SIZE;
163
    QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
164
                                                    cb, opaque);
165

    
166
    write_header_cb->s = s;
167
    write_header_cb->nsectors = nsectors;
168
    write_header_cb->buf = qemu_blockalign(s->bs, len);
169
    write_header_cb->iov.iov_base = write_header_cb->buf;
170
    write_header_cb->iov.iov_len = len;
171
    qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
172

    
173
    acb = bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
174
                         qed_write_header_read_cb, write_header_cb);
175
    if (!acb) {
176
        qed_write_header_cb(write_header_cb, -EIO);
177
    }
178
}
179

    
180
static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
181
{
182
    uint64_t table_entries;
183
    uint64_t l2_size;
184

    
185
    table_entries = (table_size * cluster_size) / sizeof(uint64_t);
186
    l2_size = table_entries * cluster_size;
187

    
188
    return l2_size * table_entries;
189
}
190

    
191
static bool qed_is_cluster_size_valid(uint32_t cluster_size)
192
{
193
    if (cluster_size < QED_MIN_CLUSTER_SIZE ||
194
        cluster_size > QED_MAX_CLUSTER_SIZE) {
195
        return false;
196
    }
197
    if (cluster_size & (cluster_size - 1)) {
198
        return false; /* not power of 2 */
199
    }
200
    return true;
201
}
202

    
203
static bool qed_is_table_size_valid(uint32_t table_size)
204
{
205
    if (table_size < QED_MIN_TABLE_SIZE ||
206
        table_size > QED_MAX_TABLE_SIZE) {
207
        return false;
208
    }
209
    if (table_size & (table_size - 1)) {
210
        return false; /* not power of 2 */
211
    }
212
    return true;
213
}
214

    
215
static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
216
                                    uint32_t table_size)
217
{
218
    if (image_size % BDRV_SECTOR_SIZE != 0) {
219
        return false; /* not multiple of sector size */
220
    }
221
    if (image_size > qed_max_image_size(cluster_size, table_size)) {
222
        return false; /* image is too large */
223
    }
224
    return true;
225
}
226

    
227
/**
228
 * Read a string of known length from the image file
229
 *
230
 * @file:       Image file
231
 * @offset:     File offset to start of string, in bytes
232
 * @n:          String length in bytes
233
 * @buf:        Destination buffer
234
 * @buflen:     Destination buffer length in bytes
235
 * @ret:        0 on success, -errno on failure
236
 *
237
 * The string is NUL-terminated.
238
 */
239
static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
240
                           char *buf, size_t buflen)
241
{
242
    int ret;
243
    if (n >= buflen) {
244
        return -EINVAL;
245
    }
246
    ret = bdrv_pread(file, offset, buf, n);
247
    if (ret < 0) {
248
        return ret;
249
    }
250
    buf[n] = '\0';
251
    return 0;
252
}
253

    
254
/**
255
 * Allocate new clusters
256
 *
257
 * @s:          QED state
258
 * @n:          Number of contiguous clusters to allocate
259
 * @ret:        Offset of first allocated cluster
260
 *
261
 * This function only produces the offset where the new clusters should be
262
 * written.  It updates BDRVQEDState but does not make any changes to the image
263
 * file.
264
 */
265
static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
266
{
267
    uint64_t offset = s->file_size;
268
    s->file_size += n * s->header.cluster_size;
269
    return offset;
270
}
271

    
272
QEDTable *qed_alloc_table(BDRVQEDState *s)
273
{
274
    /* Honor O_DIRECT memory alignment requirements */
275
    return qemu_blockalign(s->bs,
276
                           s->header.cluster_size * s->header.table_size);
277
}
278

    
279
/**
280
 * Allocate a new zeroed L2 table
281
 */
282
static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
283
{
284
    CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
285

    
286
    l2_table->table = qed_alloc_table(s);
287
    l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
288

    
289
    memset(l2_table->table->offsets, 0,
290
           s->header.cluster_size * s->header.table_size);
291
    return l2_table;
292
}
293

    
294
static void qed_aio_next_io(void *opaque, int ret);
295

    
296
static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
297
{
298
    assert(!s->allocating_write_reqs_plugged);
299

    
300
    s->allocating_write_reqs_plugged = true;
301
}
302

    
303
static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
304
{
305
    QEDAIOCB *acb;
306

    
307
    assert(s->allocating_write_reqs_plugged);
308

    
309
    s->allocating_write_reqs_plugged = false;
310

    
311
    acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
312
    if (acb) {
313
        qed_aio_next_io(acb, 0);
314
    }
315
}
316

    
317
static void qed_finish_clear_need_check(void *opaque, int ret)
318
{
319
    /* Do nothing */
320
}
321

    
322
static void qed_flush_after_clear_need_check(void *opaque, int ret)
323
{
324
    BDRVQEDState *s = opaque;
325

    
326
    bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
327

    
328
    /* No need to wait until flush completes */
329
    qed_unplug_allocating_write_reqs(s);
330
}
331

    
332
static void qed_clear_need_check(void *opaque, int ret)
333
{
334
    BDRVQEDState *s = opaque;
335

    
336
    if (ret) {
337
        qed_unplug_allocating_write_reqs(s);
338
        return;
339
    }
340

    
341
    s->header.features &= ~QED_F_NEED_CHECK;
342
    qed_write_header(s, qed_flush_after_clear_need_check, s);
343
}
344

    
345
static void qed_need_check_timer_cb(void *opaque)
346
{
347
    BDRVQEDState *s = opaque;
348

    
349
    /* The timer should only fire when allocating writes have drained */
350
    assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
351

    
352
    trace_qed_need_check_timer_cb(s);
353

    
354
    qed_plug_allocating_write_reqs(s);
355

    
356
    /* Ensure writes are on disk before clearing flag */
357
    bdrv_aio_flush(s->bs, qed_clear_need_check, s);
358
}
359

    
360
static void qed_start_need_check_timer(BDRVQEDState *s)
361
{
362
    trace_qed_start_need_check_timer(s);
363

    
364
    /* Use vm_clock so we don't alter the image file while suspended for
365
     * migration.
366
     */
367
    qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) +
368
                   get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
369
}
370

    
371
/* It's okay to call this multiple times or when no timer is started */
372
static void qed_cancel_need_check_timer(BDRVQEDState *s)
373
{
374
    trace_qed_cancel_need_check_timer(s);
375
    qemu_del_timer(s->need_check_timer);
376
}
377

    
378
static int bdrv_qed_open(BlockDriverState *bs, int flags)
379
{
380
    BDRVQEDState *s = bs->opaque;
381
    QEDHeader le_header;
382
    int64_t file_size;
383
    int ret;
384

    
385
    s->bs = bs;
386
    QSIMPLEQ_INIT(&s->allocating_write_reqs);
387

    
388
    ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
389
    if (ret < 0) {
390
        return ret;
391
    }
392
    qed_header_le_to_cpu(&le_header, &s->header);
393

    
394
    if (s->header.magic != QED_MAGIC) {
395
        return -EINVAL;
396
    }
397
    if (s->header.features & ~QED_FEATURE_MASK) {
398
        /* image uses unsupported feature bits */
399
        char buf[64];
400
        snprintf(buf, sizeof(buf), "%" PRIx64,
401
            s->header.features & ~QED_FEATURE_MASK);
402
        qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
403
            bs->device_name, "QED", buf);
404
        return -ENOTSUP;
405
    }
406
    if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
407
        return -EINVAL;
408
    }
409

    
410
    /* Round down file size to the last cluster */
411
    file_size = bdrv_getlength(bs->file);
412
    if (file_size < 0) {
413
        return file_size;
414
    }
415
    s->file_size = qed_start_of_cluster(s, file_size);
416

    
417
    if (!qed_is_table_size_valid(s->header.table_size)) {
418
        return -EINVAL;
419
    }
420
    if (!qed_is_image_size_valid(s->header.image_size,
421
                                 s->header.cluster_size,
422
                                 s->header.table_size)) {
423
        return -EINVAL;
424
    }
425
    if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
426
        return -EINVAL;
427
    }
428

    
429
    s->table_nelems = (s->header.cluster_size * s->header.table_size) /
430
                      sizeof(uint64_t);
431
    s->l2_shift = ffs(s->header.cluster_size) - 1;
432
    s->l2_mask = s->table_nelems - 1;
433
    s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
434

    
435
    if ((s->header.features & QED_F_BACKING_FILE)) {
436
        if ((uint64_t)s->header.backing_filename_offset +
437
            s->header.backing_filename_size >
438
            s->header.cluster_size * s->header.header_size) {
439
            return -EINVAL;
440
        }
441

    
442
        ret = qed_read_string(bs->file, s->header.backing_filename_offset,
443
                              s->header.backing_filename_size, bs->backing_file,
444
                              sizeof(bs->backing_file));
445
        if (ret < 0) {
446
            return ret;
447
        }
448

    
449
        if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
450
            pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
451
        }
452
    }
453

    
454
    /* Reset unknown autoclear feature bits.  This is a backwards
455
     * compatibility mechanism that allows images to be opened by older
456
     * programs, which "knock out" unknown feature bits.  When an image is
457
     * opened by a newer program again it can detect that the autoclear
458
     * feature is no longer valid.
459
     */
460
    if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
461
        !bdrv_is_read_only(bs->file)) {
462
        s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
463

    
464
        ret = qed_write_header_sync(s);
465
        if (ret) {
466
            return ret;
467
        }
468

    
469
        /* From here on only known autoclear feature bits are valid */
470
        bdrv_flush(bs->file);
471
    }
472

    
473
    s->l1_table = qed_alloc_table(s);
474
    qed_init_l2_cache(&s->l2_cache);
475

    
476
    ret = qed_read_l1_table_sync(s);
477
    if (ret) {
478
        goto out;
479
    }
480

    
481
    /* If image was not closed cleanly, check consistency */
482
    if (s->header.features & QED_F_NEED_CHECK) {
483
        /* Read-only images cannot be fixed.  There is no risk of corruption
484
         * since write operations are not possible.  Therefore, allow
485
         * potentially inconsistent images to be opened read-only.  This can
486
         * aid data recovery from an otherwise inconsistent image.
487
         */
488
        if (!bdrv_is_read_only(bs->file)) {
489
            BdrvCheckResult result = {0};
490

    
491
            ret = qed_check(s, &result, true);
492
            if (ret) {
493
                goto out;
494
            }
495
            if (!result.corruptions && !result.check_errors) {
496
                /* Ensure fixes reach storage before clearing check bit */
497
                bdrv_flush(s->bs);
498

    
499
                s->header.features &= ~QED_F_NEED_CHECK;
500
                qed_write_header_sync(s);
501
            }
502
        }
503
    }
504

    
505
    s->need_check_timer = qemu_new_timer_ns(vm_clock,
506
                                            qed_need_check_timer_cb, s);
507

    
508
    error_set(&s->migration_blocker,
509
              QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
510
              "qed", bs->device_name, "live migration");
511
    migrate_add_blocker(s->migration_blocker);
512

    
513

    
514
out:
515
    if (ret) {
516
        qed_free_l2_cache(&s->l2_cache);
517
        qemu_vfree(s->l1_table);
518
    }
519
    return ret;
520
}
521

    
522
static void bdrv_qed_close(BlockDriverState *bs)
523
{
524
    BDRVQEDState *s = bs->opaque;
525

    
526
    migrate_del_blocker(s->migration_blocker);
527
    error_free(s->migration_blocker);
528

    
529
    qed_cancel_need_check_timer(s);
530
    qemu_free_timer(s->need_check_timer);
531

    
532
    /* Ensure writes reach stable storage */
533
    bdrv_flush(bs->file);
534

    
535
    /* Clean shutdown, no check required on next open */
536
    if (s->header.features & QED_F_NEED_CHECK) {
537
        s->header.features &= ~QED_F_NEED_CHECK;
538
        qed_write_header_sync(s);
539
    }
540

    
541
    qed_free_l2_cache(&s->l2_cache);
542
    qemu_vfree(s->l1_table);
543
}
544

    
545
static int qed_create(const char *filename, uint32_t cluster_size,
546
                      uint64_t image_size, uint32_t table_size,
547
                      const char *backing_file, const char *backing_fmt)
548
{
549
    QEDHeader header = {
550
        .magic = QED_MAGIC,
551
        .cluster_size = cluster_size,
552
        .table_size = table_size,
553
        .header_size = 1,
554
        .features = 0,
555
        .compat_features = 0,
556
        .l1_table_offset = cluster_size,
557
        .image_size = image_size,
558
    };
559
    QEDHeader le_header;
560
    uint8_t *l1_table = NULL;
561
    size_t l1_size = header.cluster_size * header.table_size;
562
    int ret = 0;
563
    BlockDriverState *bs = NULL;
564

    
565
    ret = bdrv_create_file(filename, NULL);
566
    if (ret < 0) {
567
        return ret;
568
    }
569

    
570
    ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB);
571
    if (ret < 0) {
572
        return ret;
573
    }
574

    
575
    /* File must start empty and grow, check truncate is supported */
576
    ret = bdrv_truncate(bs, 0);
577
    if (ret < 0) {
578
        goto out;
579
    }
580

    
581
    if (backing_file) {
582
        header.features |= QED_F_BACKING_FILE;
583
        header.backing_filename_offset = sizeof(le_header);
584
        header.backing_filename_size = strlen(backing_file);
585

    
586
        if (qed_fmt_is_raw(backing_fmt)) {
587
            header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
588
        }
589
    }
590

    
591
    qed_header_cpu_to_le(&header, &le_header);
592
    ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
593
    if (ret < 0) {
594
        goto out;
595
    }
596
    ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
597
                      header.backing_filename_size);
598
    if (ret < 0) {
599
        goto out;
600
    }
601

    
602
    l1_table = g_malloc0(l1_size);
603
    ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
604
    if (ret < 0) {
605
        goto out;
606
    }
607

    
608
    ret = 0; /* success */
609
out:
610
    g_free(l1_table);
611
    bdrv_delete(bs);
612
    return ret;
613
}
614

    
615
static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options)
616
{
617
    uint64_t image_size = 0;
618
    uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
619
    uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
620
    const char *backing_file = NULL;
621
    const char *backing_fmt = NULL;
622

    
623
    while (options && options->name) {
624
        if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
625
            image_size = options->value.n;
626
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
627
            backing_file = options->value.s;
628
        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
629
            backing_fmt = options->value.s;
630
        } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
631
            if (options->value.n) {
632
                cluster_size = options->value.n;
633
            }
634
        } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
635
            if (options->value.n) {
636
                table_size = options->value.n;
637
            }
638
        }
639
        options++;
640
    }
641

    
642
    if (!qed_is_cluster_size_valid(cluster_size)) {
643
        fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
644
                QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
645
        return -EINVAL;
646
    }
647
    if (!qed_is_table_size_valid(table_size)) {
648
        fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
649
                QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
650
        return -EINVAL;
651
    }
652
    if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
653
        fprintf(stderr, "QED image size must be a non-zero multiple of "
654
                        "cluster size and less than %" PRIu64 " bytes\n",
655
                qed_max_image_size(cluster_size, table_size));
656
        return -EINVAL;
657
    }
658

    
659
    return qed_create(filename, cluster_size, image_size, table_size,
660
                      backing_file, backing_fmt);
661
}
662

    
663
typedef struct {
664
    Coroutine *co;
665
    int is_allocated;
666
    int *pnum;
667
} QEDIsAllocatedCB;
668

    
669
static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
670
{
671
    QEDIsAllocatedCB *cb = opaque;
672
    *cb->pnum = len / BDRV_SECTOR_SIZE;
673
    cb->is_allocated = (ret == QED_CLUSTER_FOUND || ret == QED_CLUSTER_ZERO);
674
    if (cb->co) {
675
        qemu_coroutine_enter(cb->co, NULL);
676
    }
677
}
678

    
679
static int coroutine_fn bdrv_qed_co_is_allocated(BlockDriverState *bs,
680
                                                 int64_t sector_num,
681
                                                 int nb_sectors, int *pnum)
682
{
683
    BDRVQEDState *s = bs->opaque;
684
    uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
685
    size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
686
    QEDIsAllocatedCB cb = {
687
        .is_allocated = -1,
688
        .pnum = pnum,
689
    };
690
    QEDRequest request = { .l2_table = NULL };
691

    
692
    qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb);
693

    
694
    /* Now sleep if the callback wasn't invoked immediately */
695
    while (cb.is_allocated == -1) {
696
        cb.co = qemu_coroutine_self();
697
        qemu_coroutine_yield();
698
    }
699

    
700
    qed_unref_l2_cache_entry(request.l2_table);
701

    
702
    return cb.is_allocated;
703
}
704

    
705
static int bdrv_qed_make_empty(BlockDriverState *bs)
706
{
707
    return -ENOTSUP;
708
}
709

    
710
static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
711
{
712
    return acb->common.bs->opaque;
713
}
714

    
715
/**
716
 * Read from the backing file or zero-fill if no backing file
717
 *
718
 * @s:          QED state
719
 * @pos:        Byte position in device
720
 * @qiov:       Destination I/O vector
721
 * @cb:         Completion function
722
 * @opaque:     User data for completion function
723
 *
724
 * This function reads qiov->size bytes starting at pos from the backing file.
725
 * If there is no backing file then zeroes are read.
726
 */
727
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
728
                                  QEMUIOVector *qiov,
729
                                  BlockDriverCompletionFunc *cb, void *opaque)
730
{
731
    BlockDriverAIOCB *aiocb;
732
    uint64_t backing_length = 0;
733
    size_t size;
734

    
735
    /* If there is a backing file, get its length.  Treat the absence of a
736
     * backing file like a zero length backing file.
737
     */
738
    if (s->bs->backing_hd) {
739
        int64_t l = bdrv_getlength(s->bs->backing_hd);
740
        if (l < 0) {
741
            cb(opaque, l);
742
            return;
743
        }
744
        backing_length = l;
745
    }
746

    
747
    /* Zero all sectors if reading beyond the end of the backing file */
748
    if (pos >= backing_length ||
749
        pos + qiov->size > backing_length) {
750
        qemu_iovec_memset(qiov, 0, qiov->size);
751
    }
752

    
753
    /* Complete now if there are no backing file sectors to read */
754
    if (pos >= backing_length) {
755
        cb(opaque, 0);
756
        return;
757
    }
758

    
759
    /* If the read straddles the end of the backing file, shorten it */
760
    size = MIN((uint64_t)backing_length - pos, qiov->size);
761

    
762
    BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING);
763
    aiocb = bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
764
                           qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
765
    if (!aiocb) {
766
        cb(opaque, -EIO);
767
    }
768
}
769

    
770
typedef struct {
771
    GenericCB gencb;
772
    BDRVQEDState *s;
773
    QEMUIOVector qiov;
774
    struct iovec iov;
775
    uint64_t offset;
776
} CopyFromBackingFileCB;
777

    
778
static void qed_copy_from_backing_file_cb(void *opaque, int ret)
779
{
780
    CopyFromBackingFileCB *copy_cb = opaque;
781
    qemu_vfree(copy_cb->iov.iov_base);
782
    gencb_complete(&copy_cb->gencb, ret);
783
}
784

    
785
static void qed_copy_from_backing_file_write(void *opaque, int ret)
786
{
787
    CopyFromBackingFileCB *copy_cb = opaque;
788
    BDRVQEDState *s = copy_cb->s;
789
    BlockDriverAIOCB *aiocb;
790

    
791
    if (ret) {
792
        qed_copy_from_backing_file_cb(copy_cb, ret);
793
        return;
794
    }
795

    
796
    BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
797
    aiocb = bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
798
                            &copy_cb->qiov,
799
                            copy_cb->qiov.size / BDRV_SECTOR_SIZE,
800
                            qed_copy_from_backing_file_cb, copy_cb);
801
    if (!aiocb) {
802
        qed_copy_from_backing_file_cb(copy_cb, -EIO);
803
    }
804
}
805

    
806
/**
807
 * Copy data from backing file into the image
808
 *
809
 * @s:          QED state
810
 * @pos:        Byte position in device
811
 * @len:        Number of bytes
812
 * @offset:     Byte offset in image file
813
 * @cb:         Completion function
814
 * @opaque:     User data for completion function
815
 */
816
static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
817
                                       uint64_t len, uint64_t offset,
818
                                       BlockDriverCompletionFunc *cb,
819
                                       void *opaque)
820
{
821
    CopyFromBackingFileCB *copy_cb;
822

    
823
    /* Skip copy entirely if there is no work to do */
824
    if (len == 0) {
825
        cb(opaque, 0);
826
        return;
827
    }
828

    
829
    copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
830
    copy_cb->s = s;
831
    copy_cb->offset = offset;
832
    copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
833
    copy_cb->iov.iov_len = len;
834
    qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
835

    
836
    qed_read_backing_file(s, pos, &copy_cb->qiov,
837
                          qed_copy_from_backing_file_write, copy_cb);
838
}
839

    
840
/**
841
 * Link one or more contiguous clusters into a table
842
 *
843
 * @s:              QED state
844
 * @table:          L2 table
845
 * @index:          First cluster index
846
 * @n:              Number of contiguous clusters
847
 * @cluster:        First cluster offset
848
 *
849
 * The cluster offset may be an allocated byte offset in the image file, the
850
 * zero cluster marker, or the unallocated cluster marker.
851
 */
852
static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
853
                                unsigned int n, uint64_t cluster)
854
{
855
    int i;
856
    for (i = index; i < index + n; i++) {
857
        table->offsets[i] = cluster;
858
        if (!qed_offset_is_unalloc_cluster(cluster) &&
859
            !qed_offset_is_zero_cluster(cluster)) {
860
            cluster += s->header.cluster_size;
861
        }
862
    }
863
}
864

    
865
static void qed_aio_complete_bh(void *opaque)
866
{
867
    QEDAIOCB *acb = opaque;
868
    BlockDriverCompletionFunc *cb = acb->common.cb;
869
    void *user_opaque = acb->common.opaque;
870
    int ret = acb->bh_ret;
871
    bool *finished = acb->finished;
872

    
873
    qemu_bh_delete(acb->bh);
874
    qemu_aio_release(acb);
875

    
876
    /* Invoke callback */
877
    cb(user_opaque, ret);
878

    
879
    /* Signal cancel completion */
880
    if (finished) {
881
        *finished = true;
882
    }
883
}
884

    
885
static void qed_aio_complete(QEDAIOCB *acb, int ret)
886
{
887
    BDRVQEDState *s = acb_to_s(acb);
888

    
889
    trace_qed_aio_complete(s, acb, ret);
890

    
891
    /* Free resources */
892
    qemu_iovec_destroy(&acb->cur_qiov);
893
    qed_unref_l2_cache_entry(acb->request.l2_table);
894

    
895
    /* Arrange for a bh to invoke the completion function */
896
    acb->bh_ret = ret;
897
    acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
898
    qemu_bh_schedule(acb->bh);
899

    
900
    /* Start next allocating write request waiting behind this one.  Note that
901
     * requests enqueue themselves when they first hit an unallocated cluster
902
     * but they wait until the entire request is finished before waking up the
903
     * next request in the queue.  This ensures that we don't cycle through
904
     * requests multiple times but rather finish one at a time completely.
905
     */
906
    if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
907
        QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
908
        acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
909
        if (acb) {
910
            qed_aio_next_io(acb, 0);
911
        } else if (s->header.features & QED_F_NEED_CHECK) {
912
            qed_start_need_check_timer(s);
913
        }
914
    }
915
}
916

    
917
/**
918
 * Commit the current L2 table to the cache
919
 */
920
static void qed_commit_l2_update(void *opaque, int ret)
921
{
922
    QEDAIOCB *acb = opaque;
923
    BDRVQEDState *s = acb_to_s(acb);
924
    CachedL2Table *l2_table = acb->request.l2_table;
925
    uint64_t l2_offset = l2_table->offset;
926

    
927
    qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
928

    
929
    /* This is guaranteed to succeed because we just committed the entry to the
930
     * cache.
931
     */
932
    acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
933
    assert(acb->request.l2_table != NULL);
934

    
935
    qed_aio_next_io(opaque, ret);
936
}
937

    
938
/**
939
 * Update L1 table with new L2 table offset and write it out
940
 */
941
static void qed_aio_write_l1_update(void *opaque, int ret)
942
{
943
    QEDAIOCB *acb = opaque;
944
    BDRVQEDState *s = acb_to_s(acb);
945
    int index;
946

    
947
    if (ret) {
948
        qed_aio_complete(acb, ret);
949
        return;
950
    }
951

    
952
    index = qed_l1_index(s, acb->cur_pos);
953
    s->l1_table->offsets[index] = acb->request.l2_table->offset;
954

    
955
    qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
956
}
957

    
958
/**
959
 * Update L2 table with new cluster offsets and write them out
960
 */
961
static void qed_aio_write_l2_update(void *opaque, int ret)
962
{
963
    QEDAIOCB *acb = opaque;
964
    BDRVQEDState *s = acb_to_s(acb);
965
    bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
966
    int index;
967

    
968
    if (ret) {
969
        goto err;
970
    }
971

    
972
    if (need_alloc) {
973
        qed_unref_l2_cache_entry(acb->request.l2_table);
974
        acb->request.l2_table = qed_new_l2_table(s);
975
    }
976

    
977
    index = qed_l2_index(s, acb->cur_pos);
978
    qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
979
                         acb->cur_cluster);
980

    
981
    if (need_alloc) {
982
        /* Write out the whole new L2 table */
983
        qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
984
                            qed_aio_write_l1_update, acb);
985
    } else {
986
        /* Write out only the updated part of the L2 table */
987
        qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
988
                            qed_aio_next_io, acb);
989
    }
990
    return;
991

    
992
err:
993
    qed_aio_complete(acb, ret);
994
}
995

    
996
/**
997
 * Flush new data clusters before updating the L2 table
998
 *
999
 * This flush is necessary when a backing file is in use.  A crash during an
1000
 * allocating write could result in empty clusters in the image.  If the write
1001
 * only touched a subregion of the cluster, then backing image sectors have
1002
 * been lost in the untouched region.  The solution is to flush after writing a
1003
 * new data cluster and before updating the L2 table.
1004
 */
1005
static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1006
{
1007
    QEDAIOCB *acb = opaque;
1008
    BDRVQEDState *s = acb_to_s(acb);
1009

    
1010
    if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update, opaque)) {
1011
        qed_aio_complete(acb, -EIO);
1012
    }
1013
}
1014

    
1015
/**
1016
 * Write data to the image file
1017
 */
1018
static void qed_aio_write_main(void *opaque, int ret)
1019
{
1020
    QEDAIOCB *acb = opaque;
1021
    BDRVQEDState *s = acb_to_s(acb);
1022
    uint64_t offset = acb->cur_cluster +
1023
                      qed_offset_into_cluster(s, acb->cur_pos);
1024
    BlockDriverCompletionFunc *next_fn;
1025
    BlockDriverAIOCB *file_acb;
1026

    
1027
    trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1028

    
1029
    if (ret) {
1030
        qed_aio_complete(acb, ret);
1031
        return;
1032
    }
1033

    
1034
    if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1035
        next_fn = qed_aio_next_io;
1036
    } else {
1037
        if (s->bs->backing_hd) {
1038
            next_fn = qed_aio_write_flush_before_l2_update;
1039
        } else {
1040
            next_fn = qed_aio_write_l2_update;
1041
        }
1042
    }
1043

    
1044
    BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1045
    file_acb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1046
                               &acb->cur_qiov,
1047
                               acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1048
                               next_fn, acb);
1049
    if (!file_acb) {
1050
        qed_aio_complete(acb, -EIO);
1051
    }
1052
}
1053

    
1054
/**
1055
 * Populate back untouched region of new data cluster
1056
 */
1057
static void qed_aio_write_postfill(void *opaque, int ret)
1058
{
1059
    QEDAIOCB *acb = opaque;
1060
    BDRVQEDState *s = acb_to_s(acb);
1061
    uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1062
    uint64_t len =
1063
        qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1064
    uint64_t offset = acb->cur_cluster +
1065
                      qed_offset_into_cluster(s, acb->cur_pos) +
1066
                      acb->cur_qiov.size;
1067

    
1068
    if (ret) {
1069
        qed_aio_complete(acb, ret);
1070
        return;
1071
    }
1072

    
1073
    trace_qed_aio_write_postfill(s, acb, start, len, offset);
1074
    qed_copy_from_backing_file(s, start, len, offset,
1075
                                qed_aio_write_main, acb);
1076
}
1077

    
1078
/**
1079
 * Populate front untouched region of new data cluster
1080
 */
1081
static void qed_aio_write_prefill(void *opaque, int ret)
1082
{
1083
    QEDAIOCB *acb = opaque;
1084
    BDRVQEDState *s = acb_to_s(acb);
1085
    uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1086
    uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1087

    
1088
    trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1089
    qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1090
                                qed_aio_write_postfill, acb);
1091
}
1092

    
1093
/**
1094
 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1095
 */
1096
static bool qed_should_set_need_check(BDRVQEDState *s)
1097
{
1098
    /* The flush before L2 update path ensures consistency */
1099
    if (s->bs->backing_hd) {
1100
        return false;
1101
    }
1102

    
1103
    return !(s->header.features & QED_F_NEED_CHECK);
1104
}
1105

    
1106
/**
1107
 * Write new data cluster
1108
 *
1109
 * @acb:        Write request
1110
 * @len:        Length in bytes
1111
 *
1112
 * This path is taken when writing to previously unallocated clusters.
1113
 */
1114
static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1115
{
1116
    BDRVQEDState *s = acb_to_s(acb);
1117

    
1118
    /* Cancel timer when the first allocating request comes in */
1119
    if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1120
        qed_cancel_need_check_timer(s);
1121
    }
1122

    
1123
    /* Freeze this request if another allocating write is in progress */
1124
    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1125
        QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1126
    }
1127
    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1128
        s->allocating_write_reqs_plugged) {
1129
        return; /* wait for existing request to finish */
1130
    }
1131

    
1132
    acb->cur_nclusters = qed_bytes_to_clusters(s,
1133
            qed_offset_into_cluster(s, acb->cur_pos) + len);
1134
    acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1135
    qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1136

    
1137
    if (qed_should_set_need_check(s)) {
1138
        s->header.features |= QED_F_NEED_CHECK;
1139
        qed_write_header(s, qed_aio_write_prefill, acb);
1140
    } else {
1141
        qed_aio_write_prefill(acb, 0);
1142
    }
1143
}
1144

    
1145
/**
1146
 * Write data cluster in place
1147
 *
1148
 * @acb:        Write request
1149
 * @offset:     Cluster offset in bytes
1150
 * @len:        Length in bytes
1151
 *
1152
 * This path is taken when writing to already allocated clusters.
1153
 */
1154
static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1155
{
1156
    /* Calculate the I/O vector */
1157
    acb->cur_cluster = offset;
1158
    qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1159

    
1160
    /* Do the actual write */
1161
    qed_aio_write_main(acb, 0);
1162
}
1163

    
1164
/**
1165
 * Write data cluster
1166
 *
1167
 * @opaque:     Write request
1168
 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1169
 *              or -errno
1170
 * @offset:     Cluster offset in bytes
1171
 * @len:        Length in bytes
1172
 *
1173
 * Callback from qed_find_cluster().
1174
 */
1175
static void qed_aio_write_data(void *opaque, int ret,
1176
                               uint64_t offset, size_t len)
1177
{
1178
    QEDAIOCB *acb = opaque;
1179

    
1180
    trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1181

    
1182
    acb->find_cluster_ret = ret;
1183

    
1184
    switch (ret) {
1185
    case QED_CLUSTER_FOUND:
1186
        qed_aio_write_inplace(acb, offset, len);
1187
        break;
1188

    
1189
    case QED_CLUSTER_L2:
1190
    case QED_CLUSTER_L1:
1191
    case QED_CLUSTER_ZERO:
1192
        qed_aio_write_alloc(acb, len);
1193
        break;
1194

    
1195
    default:
1196
        qed_aio_complete(acb, ret);
1197
        break;
1198
    }
1199
}
1200

    
1201
/**
1202
 * Read data cluster
1203
 *
1204
 * @opaque:     Read request
1205
 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1206
 *              or -errno
1207
 * @offset:     Cluster offset in bytes
1208
 * @len:        Length in bytes
1209
 *
1210
 * Callback from qed_find_cluster().
1211
 */
1212
static void qed_aio_read_data(void *opaque, int ret,
1213
                              uint64_t offset, size_t len)
1214
{
1215
    QEDAIOCB *acb = opaque;
1216
    BDRVQEDState *s = acb_to_s(acb);
1217
    BlockDriverState *bs = acb->common.bs;
1218
    BlockDriverAIOCB *file_acb;
1219

    
1220
    /* Adjust offset into cluster */
1221
    offset += qed_offset_into_cluster(s, acb->cur_pos);
1222

    
1223
    trace_qed_aio_read_data(s, acb, ret, offset, len);
1224

    
1225
    if (ret < 0) {
1226
        goto err;
1227
    }
1228

    
1229
    qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1230

    
1231
    /* Handle zero cluster and backing file reads */
1232
    if (ret == QED_CLUSTER_ZERO) {
1233
        qemu_iovec_memset(&acb->cur_qiov, 0, acb->cur_qiov.size);
1234
        qed_aio_next_io(acb, 0);
1235
        return;
1236
    } else if (ret != QED_CLUSTER_FOUND) {
1237
        qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1238
                              qed_aio_next_io, acb);
1239
        return;
1240
    }
1241

    
1242
    BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1243
    file_acb = bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1244
                              &acb->cur_qiov,
1245
                              acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1246
                              qed_aio_next_io, acb);
1247
    if (!file_acb) {
1248
        ret = -EIO;
1249
        goto err;
1250
    }
1251
    return;
1252

    
1253
err:
1254
    qed_aio_complete(acb, ret);
1255
}
1256

    
1257
/**
1258
 * Begin next I/O or complete the request
1259
 */
1260
static void qed_aio_next_io(void *opaque, int ret)
1261
{
1262
    QEDAIOCB *acb = opaque;
1263
    BDRVQEDState *s = acb_to_s(acb);
1264
    QEDFindClusterFunc *io_fn =
1265
        acb->is_write ? qed_aio_write_data : qed_aio_read_data;
1266

    
1267
    trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1268

    
1269
    /* Handle I/O error */
1270
    if (ret) {
1271
        qed_aio_complete(acb, ret);
1272
        return;
1273
    }
1274

    
1275
    acb->qiov_offset += acb->cur_qiov.size;
1276
    acb->cur_pos += acb->cur_qiov.size;
1277
    qemu_iovec_reset(&acb->cur_qiov);
1278

    
1279
    /* Complete request */
1280
    if (acb->cur_pos >= acb->end_pos) {
1281
        qed_aio_complete(acb, 0);
1282
        return;
1283
    }
1284

    
1285
    /* Find next cluster and start I/O */
1286
    qed_find_cluster(s, &acb->request,
1287
                      acb->cur_pos, acb->end_pos - acb->cur_pos,
1288
                      io_fn, acb);
1289
}
1290

    
1291
static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1292
                                       int64_t sector_num,
1293
                                       QEMUIOVector *qiov, int nb_sectors,
1294
                                       BlockDriverCompletionFunc *cb,
1295
                                       void *opaque, bool is_write)
1296
{
1297
    QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque);
1298

    
1299
    trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1300
                         opaque, is_write);
1301

    
1302
    acb->is_write = is_write;
1303
    acb->finished = NULL;
1304
    acb->qiov = qiov;
1305
    acb->qiov_offset = 0;
1306
    acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1307
    acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1308
    acb->request.l2_table = NULL;
1309
    qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1310

    
1311
    /* Start request */
1312
    qed_aio_next_io(acb, 0);
1313
    return &acb->common;
1314
}
1315

    
1316
static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1317
                                            int64_t sector_num,
1318
                                            QEMUIOVector *qiov, int nb_sectors,
1319
                                            BlockDriverCompletionFunc *cb,
1320
                                            void *opaque)
1321
{
1322
    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, false);
1323
}
1324

    
1325
static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1326
                                             int64_t sector_num,
1327
                                             QEMUIOVector *qiov, int nb_sectors,
1328
                                             BlockDriverCompletionFunc *cb,
1329
                                             void *opaque)
1330
{
1331
    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, true);
1332
}
1333

    
1334
static BlockDriverAIOCB *bdrv_qed_aio_flush(BlockDriverState *bs,
1335
                                            BlockDriverCompletionFunc *cb,
1336
                                            void *opaque)
1337
{
1338
    return bdrv_aio_flush(bs->file, cb, opaque);
1339
}
1340

    
1341
static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1342
{
1343
    BDRVQEDState *s = bs->opaque;
1344
    uint64_t old_image_size;
1345
    int ret;
1346

    
1347
    if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1348
                                 s->header.table_size)) {
1349
        return -EINVAL;
1350
    }
1351

    
1352
    /* Shrinking is currently not supported */
1353
    if ((uint64_t)offset < s->header.image_size) {
1354
        return -ENOTSUP;
1355
    }
1356

    
1357
    old_image_size = s->header.image_size;
1358
    s->header.image_size = offset;
1359
    ret = qed_write_header_sync(s);
1360
    if (ret < 0) {
1361
        s->header.image_size = old_image_size;
1362
    }
1363
    return ret;
1364
}
1365

    
1366
static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1367
{
1368
    BDRVQEDState *s = bs->opaque;
1369
    return s->header.image_size;
1370
}
1371

    
1372
static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1373
{
1374
    BDRVQEDState *s = bs->opaque;
1375

    
1376
    memset(bdi, 0, sizeof(*bdi));
1377
    bdi->cluster_size = s->header.cluster_size;
1378
    return 0;
1379
}
1380

    
1381
static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1382
                                        const char *backing_file,
1383
                                        const char *backing_fmt)
1384
{
1385
    BDRVQEDState *s = bs->opaque;
1386
    QEDHeader new_header, le_header;
1387
    void *buffer;
1388
    size_t buffer_len, backing_file_len;
1389
    int ret;
1390

    
1391
    /* Refuse to set backing filename if unknown compat feature bits are
1392
     * active.  If the image uses an unknown compat feature then we may not
1393
     * know the layout of data following the header structure and cannot safely
1394
     * add a new string.
1395
     */
1396
    if (backing_file && (s->header.compat_features &
1397
                         ~QED_COMPAT_FEATURE_MASK)) {
1398
        return -ENOTSUP;
1399
    }
1400

    
1401
    memcpy(&new_header, &s->header, sizeof(new_header));
1402

    
1403
    new_header.features &= ~(QED_F_BACKING_FILE |
1404
                             QED_F_BACKING_FORMAT_NO_PROBE);
1405

    
1406
    /* Adjust feature flags */
1407
    if (backing_file) {
1408
        new_header.features |= QED_F_BACKING_FILE;
1409

    
1410
        if (qed_fmt_is_raw(backing_fmt)) {
1411
            new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1412
        }
1413
    }
1414

    
1415
    /* Calculate new header size */
1416
    backing_file_len = 0;
1417

    
1418
    if (backing_file) {
1419
        backing_file_len = strlen(backing_file);
1420
    }
1421

    
1422
    buffer_len = sizeof(new_header);
1423
    new_header.backing_filename_offset = buffer_len;
1424
    new_header.backing_filename_size = backing_file_len;
1425
    buffer_len += backing_file_len;
1426

    
1427
    /* Make sure we can rewrite header without failing */
1428
    if (buffer_len > new_header.header_size * new_header.cluster_size) {
1429
        return -ENOSPC;
1430
    }
1431

    
1432
    /* Prepare new header */
1433
    buffer = g_malloc(buffer_len);
1434

    
1435
    qed_header_cpu_to_le(&new_header, &le_header);
1436
    memcpy(buffer, &le_header, sizeof(le_header));
1437
    buffer_len = sizeof(le_header);
1438

    
1439
    if (backing_file) {
1440
        memcpy(buffer + buffer_len, backing_file, backing_file_len);
1441
        buffer_len += backing_file_len;
1442
    }
1443

    
1444
    /* Write new header */
1445
    ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1446
    g_free(buffer);
1447
    if (ret == 0) {
1448
        memcpy(&s->header, &new_header, sizeof(new_header));
1449
    }
1450
    return ret;
1451
}
1452

    
1453
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result)
1454
{
1455
    BDRVQEDState *s = bs->opaque;
1456

    
1457
    return qed_check(s, result, false);
1458
}
1459

    
1460
static QEMUOptionParameter qed_create_options[] = {
1461
    {
1462
        .name = BLOCK_OPT_SIZE,
1463
        .type = OPT_SIZE,
1464
        .help = "Virtual disk size (in bytes)"
1465
    }, {
1466
        .name = BLOCK_OPT_BACKING_FILE,
1467
        .type = OPT_STRING,
1468
        .help = "File name of a base image"
1469
    }, {
1470
        .name = BLOCK_OPT_BACKING_FMT,
1471
        .type = OPT_STRING,
1472
        .help = "Image format of the base image"
1473
    }, {
1474
        .name = BLOCK_OPT_CLUSTER_SIZE,
1475
        .type = OPT_SIZE,
1476
        .help = "Cluster size (in bytes)",
1477
        .value = { .n = QED_DEFAULT_CLUSTER_SIZE },
1478
    }, {
1479
        .name = BLOCK_OPT_TABLE_SIZE,
1480
        .type = OPT_SIZE,
1481
        .help = "L1/L2 table size (in clusters)"
1482
    },
1483
    { /* end of list */ }
1484
};
1485

    
1486
static BlockDriver bdrv_qed = {
1487
    .format_name              = "qed",
1488
    .instance_size            = sizeof(BDRVQEDState),
1489
    .create_options           = qed_create_options,
1490

    
1491
    .bdrv_probe               = bdrv_qed_probe,
1492
    .bdrv_open                = bdrv_qed_open,
1493
    .bdrv_close               = bdrv_qed_close,
1494
    .bdrv_create              = bdrv_qed_create,
1495
    .bdrv_co_is_allocated     = bdrv_qed_co_is_allocated,
1496
    .bdrv_make_empty          = bdrv_qed_make_empty,
1497
    .bdrv_aio_readv           = bdrv_qed_aio_readv,
1498
    .bdrv_aio_writev          = bdrv_qed_aio_writev,
1499
    .bdrv_aio_flush           = bdrv_qed_aio_flush,
1500
    .bdrv_truncate            = bdrv_qed_truncate,
1501
    .bdrv_getlength           = bdrv_qed_getlength,
1502
    .bdrv_get_info            = bdrv_qed_get_info,
1503
    .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1504
    .bdrv_check               = bdrv_qed_check,
1505
};
1506

    
1507
static void bdrv_qed_init(void)
1508
{
1509
    bdrv_register(&bdrv_qed);
1510
}
1511

    
1512
block_init(bdrv_qed_init);