Statistics
| Branch: | Revision:

root / block / mirror.c @ 5bc361b8

History | View | Annotate | Download (19.2 kB)

1
/*
2
 * Image mirroring
3
 *
4
 * Copyright Red Hat, Inc. 2012
5
 *
6
 * Authors:
7
 *  Paolo Bonzini  <pbonzini@redhat.com>
8
 *
9
 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10
 * See the COPYING.LIB file in the top-level directory.
11
 *
12
 */
13

    
14
#include "trace.h"
15
#include "block/blockjob.h"
16
#include "block/block_int.h"
17
#include "qemu/ratelimit.h"
18
#include "qemu/bitmap.h"
19

    
20
#define SLICE_TIME    100000000ULL /* ns */
21
#define MAX_IN_FLIGHT 16
22

    
23
/* The mirroring buffer is a list of granularity-sized chunks.
24
 * Free chunks are organized in a list.
25
 */
26
typedef struct MirrorBuffer {
27
    QSIMPLEQ_ENTRY(MirrorBuffer) next;
28
} MirrorBuffer;
29

    
30
typedef struct MirrorBlockJob {
31
    BlockJob common;
32
    RateLimit limit;
33
    BlockDriverState *target;
34
    BlockDriverState *base;
35
    MirrorSyncMode mode;
36
    BlockdevOnError on_source_error, on_target_error;
37
    bool synced;
38
    bool should_complete;
39
    int64_t sector_num;
40
    int64_t granularity;
41
    size_t buf_size;
42
    unsigned long *cow_bitmap;
43
    BdrvDirtyBitmap *dirty_bitmap;
44
    HBitmapIter hbi;
45
    uint8_t *buf;
46
    QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
47
    int buf_free_count;
48

    
49
    unsigned long *in_flight_bitmap;
50
    int in_flight;
51
    int ret;
52
} MirrorBlockJob;
53

    
54
typedef struct MirrorOp {
55
    MirrorBlockJob *s;
56
    QEMUIOVector qiov;
57
    int64_t sector_num;
58
    int nb_sectors;
59
} MirrorOp;
60

    
61
static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
62
                                            int error)
63
{
64
    s->synced = false;
65
    if (read) {
66
        return block_job_error_action(&s->common, s->common.bs,
67
                                      s->on_source_error, true, error);
68
    } else {
69
        return block_job_error_action(&s->common, s->target,
70
                                      s->on_target_error, false, error);
71
    }
72
}
73

    
74
static void mirror_iteration_done(MirrorOp *op, int ret)
75
{
76
    MirrorBlockJob *s = op->s;
77
    struct iovec *iov;
78
    int64_t chunk_num;
79
    int i, nb_chunks, sectors_per_chunk;
80

    
81
    trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
82

    
83
    s->in_flight--;
84
    iov = op->qiov.iov;
85
    for (i = 0; i < op->qiov.niov; i++) {
86
        MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
87
        QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
88
        s->buf_free_count++;
89
    }
90

    
91
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
92
    chunk_num = op->sector_num / sectors_per_chunk;
93
    nb_chunks = op->nb_sectors / sectors_per_chunk;
94
    bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
95
    if (s->cow_bitmap && ret >= 0) {
96
        bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
97
    }
98

    
99
    g_slice_free(MirrorOp, op);
100
    qemu_coroutine_enter(s->common.co, NULL);
101
}
102

    
103
static void mirror_write_complete(void *opaque, int ret)
104
{
105
    MirrorOp *op = opaque;
106
    MirrorBlockJob *s = op->s;
107
    if (ret < 0) {
108
        BlockDriverState *source = s->common.bs;
109
        BlockErrorAction action;
110

    
111
        bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
112
        action = mirror_error_action(s, false, -ret);
113
        if (action == BDRV_ACTION_REPORT && s->ret >= 0) {
114
            s->ret = ret;
115
        }
116
    }
117
    mirror_iteration_done(op, ret);
118
}
119

    
120
static void mirror_read_complete(void *opaque, int ret)
121
{
122
    MirrorOp *op = opaque;
123
    MirrorBlockJob *s = op->s;
124
    if (ret < 0) {
125
        BlockDriverState *source = s->common.bs;
126
        BlockErrorAction action;
127

    
128
        bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
129
        action = mirror_error_action(s, true, -ret);
130
        if (action == BDRV_ACTION_REPORT && s->ret >= 0) {
131
            s->ret = ret;
132
        }
133

    
134
        mirror_iteration_done(op, ret);
135
        return;
136
    }
137
    bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
138
                    mirror_write_complete, op);
139
}
140

    
141
static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
142
{
143
    BlockDriverState *source = s->common.bs;
144
    int nb_sectors, sectors_per_chunk, nb_chunks;
145
    int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
146
    MirrorOp *op;
147

    
148
    s->sector_num = hbitmap_iter_next(&s->hbi);
149
    if (s->sector_num < 0) {
150
        bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi);
151
        s->sector_num = hbitmap_iter_next(&s->hbi);
152
        trace_mirror_restart_iter(s,
153
                                  bdrv_get_dirty_count(source, s->dirty_bitmap));
154
        assert(s->sector_num >= 0);
155
    }
156

    
157
    hbitmap_next_sector = s->sector_num;
158
    sector_num = s->sector_num;
159
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
160
    end = s->common.len >> BDRV_SECTOR_BITS;
161

    
162
    /* Extend the QEMUIOVector to include all adjacent blocks that will
163
     * be copied in this operation.
164
     *
165
     * We have to do this if we have no backing file yet in the destination,
166
     * and the cluster size is very large.  Then we need to do COW ourselves.
167
     * The first time a cluster is copied, copy it entirely.  Note that,
168
     * because both the granularity and the cluster size are powers of two,
169
     * the number of sectors to copy cannot exceed one cluster.
170
     *
171
     * We also want to extend the QEMUIOVector to include more adjacent
172
     * dirty blocks if possible, to limit the number of I/O operations and
173
     * run efficiently even with a small granularity.
174
     */
175
    nb_chunks = 0;
176
    nb_sectors = 0;
177
    next_sector = sector_num;
178
    next_chunk = sector_num / sectors_per_chunk;
179

    
180
    /* Wait for I/O to this cluster (from a previous iteration) to be done.  */
181
    while (test_bit(next_chunk, s->in_flight_bitmap)) {
182
        trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
183
        qemu_coroutine_yield();
184
    }
185

    
186
    do {
187
        int added_sectors, added_chunks;
188

    
189
        if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
190
            test_bit(next_chunk, s->in_flight_bitmap)) {
191
            assert(nb_sectors > 0);
192
            break;
193
        }
194

    
195
        added_sectors = sectors_per_chunk;
196
        if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
197
            bdrv_round_to_clusters(s->target,
198
                                   next_sector, added_sectors,
199
                                   &next_sector, &added_sectors);
200

    
201
            /* On the first iteration, the rounding may make us copy
202
             * sectors before the first dirty one.
203
             */
204
            if (next_sector < sector_num) {
205
                assert(nb_sectors == 0);
206
                sector_num = next_sector;
207
                next_chunk = next_sector / sectors_per_chunk;
208
            }
209
        }
210

    
211
        added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
212
        added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
213

    
214
        /* When doing COW, it may happen that there is not enough space for
215
         * a full cluster.  Wait if that is the case.
216
         */
217
        while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
218
            trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
219
            qemu_coroutine_yield();
220
        }
221
        if (s->buf_free_count < nb_chunks + added_chunks) {
222
            trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
223
            break;
224
        }
225

    
226
        /* We have enough free space to copy these sectors.  */
227
        bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
228

    
229
        nb_sectors += added_sectors;
230
        nb_chunks += added_chunks;
231
        next_sector += added_sectors;
232
        next_chunk += added_chunks;
233
    } while (next_sector < end);
234

    
235
    /* Allocate a MirrorOp that is used as an AIO callback.  */
236
    op = g_slice_new(MirrorOp);
237
    op->s = s;
238
    op->sector_num = sector_num;
239
    op->nb_sectors = nb_sectors;
240

    
241
    /* Now make a QEMUIOVector taking enough granularity-sized chunks
242
     * from s->buf_free.
243
     */
244
    qemu_iovec_init(&op->qiov, nb_chunks);
245
    next_sector = sector_num;
246
    while (nb_chunks-- > 0) {
247
        MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
248
        QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
249
        s->buf_free_count--;
250
        qemu_iovec_add(&op->qiov, buf, s->granularity);
251

    
252
        /* Advance the HBitmapIter in parallel, so that we do not examine
253
         * the same sector twice.
254
         */
255
        if (next_sector > hbitmap_next_sector
256
            && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
257
            hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
258
        }
259

    
260
        next_sector += sectors_per_chunk;
261
    }
262

    
263
    bdrv_reset_dirty(source, sector_num, nb_sectors);
264

    
265
    /* Copy the dirty cluster.  */
266
    s->in_flight++;
267
    trace_mirror_one_iteration(s, sector_num, nb_sectors);
268
    bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
269
                   mirror_read_complete, op);
270
}
271

    
272
static void mirror_free_init(MirrorBlockJob *s)
273
{
274
    int granularity = s->granularity;
275
    size_t buf_size = s->buf_size;
276
    uint8_t *buf = s->buf;
277

    
278
    assert(s->buf_free_count == 0);
279
    QSIMPLEQ_INIT(&s->buf_free);
280
    while (buf_size != 0) {
281
        MirrorBuffer *cur = (MirrorBuffer *)buf;
282
        QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
283
        s->buf_free_count++;
284
        buf_size -= granularity;
285
        buf += granularity;
286
    }
287
}
288

    
289
static void mirror_drain(MirrorBlockJob *s)
290
{
291
    while (s->in_flight > 0) {
292
        qemu_coroutine_yield();
293
    }
294
}
295

    
296
static void coroutine_fn mirror_run(void *opaque)
297
{
298
    MirrorBlockJob *s = opaque;
299
    BlockDriverState *bs = s->common.bs;
300
    int64_t sector_num, end, sectors_per_chunk, length;
301
    uint64_t last_pause_ns;
302
    BlockDriverInfo bdi;
303
    char backing_filename[1024];
304
    int ret = 0;
305
    int n;
306

    
307
    if (block_job_is_cancelled(&s->common)) {
308
        goto immediate_exit;
309
    }
310

    
311
    s->common.len = bdrv_getlength(bs);
312
    if (s->common.len <= 0) {
313
        block_job_completed(&s->common, s->common.len);
314
        return;
315
    }
316

    
317
    length = (bdrv_getlength(bs) + s->granularity - 1) / s->granularity;
318
    s->in_flight_bitmap = bitmap_new(length);
319

    
320
    /* If we have no backing file yet in the destination, we cannot let
321
     * the destination do COW.  Instead, we copy sectors around the
322
     * dirty data if needed.  We need a bitmap to do that.
323
     */
324
    bdrv_get_backing_filename(s->target, backing_filename,
325
                              sizeof(backing_filename));
326
    if (backing_filename[0] && !s->target->backing_hd) {
327
        bdrv_get_info(s->target, &bdi);
328
        if (s->granularity < bdi.cluster_size) {
329
            s->buf_size = MAX(s->buf_size, bdi.cluster_size);
330
            s->cow_bitmap = bitmap_new(length);
331
        }
332
    }
333

    
334
    end = s->common.len >> BDRV_SECTOR_BITS;
335
    s->buf = qemu_blockalign(bs, s->buf_size);
336
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
337
    mirror_free_init(s);
338

    
339
    if (s->mode != MIRROR_SYNC_MODE_NONE) {
340
        /* First part, loop on the sectors and initialize the dirty bitmap.  */
341
        BlockDriverState *base = s->base;
342
        for (sector_num = 0; sector_num < end; ) {
343
            int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
344
            ret = bdrv_is_allocated_above(bs, base,
345
                                          sector_num, next - sector_num, &n);
346

    
347
            if (ret < 0) {
348
                goto immediate_exit;
349
            }
350

    
351
            assert(n > 0);
352
            if (ret == 1) {
353
                bdrv_set_dirty(bs, sector_num, n);
354
                sector_num = next;
355
            } else {
356
                sector_num += n;
357
            }
358
        }
359
    }
360

    
361
    bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi);
362
    last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
363
    for (;;) {
364
        uint64_t delay_ns;
365
        int64_t cnt;
366
        bool should_complete;
367

    
368
        if (s->ret < 0) {
369
            ret = s->ret;
370
            goto immediate_exit;
371
        }
372

    
373
        cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
374

    
375
        /* Note that even when no rate limit is applied we need to yield
376
         * periodically with no pending I/O so that qemu_aio_flush() returns.
377
         * We do so every SLICE_TIME nanoseconds, or when there is an error,
378
         * or when the source is clean, whichever comes first.
379
         */
380
        if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
381
            s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
382
            if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
383
                (cnt == 0 && s->in_flight > 0)) {
384
                trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
385
                qemu_coroutine_yield();
386
                continue;
387
            } else if (cnt != 0) {
388
                mirror_iteration(s);
389
                continue;
390
            }
391
        }
392

    
393
        should_complete = false;
394
        if (s->in_flight == 0 && cnt == 0) {
395
            trace_mirror_before_flush(s);
396
            ret = bdrv_flush(s->target);
397
            if (ret < 0) {
398
                if (mirror_error_action(s, false, -ret) == BDRV_ACTION_REPORT) {
399
                    goto immediate_exit;
400
                }
401
            } else {
402
                /* We're out of the streaming phase.  From now on, if the job
403
                 * is cancelled we will actually complete all pending I/O and
404
                 * report completion.  This way, block-job-cancel will leave
405
                 * the target in a consistent state.
406
                 */
407
                s->common.offset = end * BDRV_SECTOR_SIZE;
408
                if (!s->synced) {
409
                    block_job_ready(&s->common);
410
                    s->synced = true;
411
                }
412

    
413
                should_complete = s->should_complete ||
414
                    block_job_is_cancelled(&s->common);
415
                cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
416
            }
417
        }
418

    
419
        if (cnt == 0 && should_complete) {
420
            /* The dirty bitmap is not updated while operations are pending.
421
             * If we're about to exit, wait for pending operations before
422
             * calling bdrv_get_dirty_count(bs), or we may exit while the
423
             * source has dirty data to copy!
424
             *
425
             * Note that I/O can be submitted by the guest while
426
             * mirror_populate runs.
427
             */
428
            trace_mirror_before_drain(s, cnt);
429
            bdrv_drain_all();
430
            cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
431
        }
432

    
433
        ret = 0;
434
        trace_mirror_before_sleep(s, cnt, s->synced);
435
        if (!s->synced) {
436
            /* Publish progress */
437
            s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE;
438

    
439
            if (s->common.speed) {
440
                delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk);
441
            } else {
442
                delay_ns = 0;
443
            }
444

    
445
            block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
446
            if (block_job_is_cancelled(&s->common)) {
447
                break;
448
            }
449
        } else if (!should_complete) {
450
            delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
451
            block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
452
        } else if (cnt == 0) {
453
            /* The two disks are in sync.  Exit and report successful
454
             * completion.
455
             */
456
            assert(QLIST_EMPTY(&bs->tracked_requests));
457
            s->common.cancelled = false;
458
            break;
459
        }
460
        last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
461
    }
462

    
463
immediate_exit:
464
    if (s->in_flight > 0) {
465
        /* We get here only if something went wrong.  Either the job failed,
466
         * or it was cancelled prematurely so that we do not guarantee that
467
         * the target is a copy of the source.
468
         */
469
        assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
470
        mirror_drain(s);
471
    }
472

    
473
    assert(s->in_flight == 0);
474
    qemu_vfree(s->buf);
475
    g_free(s->cow_bitmap);
476
    g_free(s->in_flight_bitmap);
477
    bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
478
    bdrv_iostatus_disable(s->target);
479
    if (s->should_complete && ret == 0) {
480
        if (bdrv_get_flags(s->target) != bdrv_get_flags(s->common.bs)) {
481
            bdrv_reopen(s->target, bdrv_get_flags(s->common.bs), NULL);
482
        }
483
        bdrv_swap(s->target, s->common.bs);
484
    }
485
    bdrv_unref(s->target);
486
    block_job_completed(&s->common, ret);
487
}
488

    
489
static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
490
{
491
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
492

    
493
    if (speed < 0) {
494
        error_set(errp, QERR_INVALID_PARAMETER, "speed");
495
        return;
496
    }
497
    ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
498
}
499

    
500
static void mirror_iostatus_reset(BlockJob *job)
501
{
502
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
503

    
504
    bdrv_iostatus_reset(s->target);
505
}
506

    
507
static void mirror_complete(BlockJob *job, Error **errp)
508
{
509
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
510
    Error *local_err = NULL;
511
    int ret;
512

    
513
    ret = bdrv_open_backing_file(s->target, NULL, &local_err);
514
    if (ret < 0) {
515
        char backing_filename[PATH_MAX];
516
        bdrv_get_full_backing_filename(s->target, backing_filename,
517
                                       sizeof(backing_filename));
518
        error_propagate(errp, local_err);
519
        return;
520
    }
521
    if (!s->synced) {
522
        error_set(errp, QERR_BLOCK_JOB_NOT_READY, job->bs->device_name);
523
        return;
524
    }
525

    
526
    s->should_complete = true;
527
    block_job_resume(job);
528
}
529

    
530
static const BlockJobDriver mirror_job_driver = {
531
    .instance_size = sizeof(MirrorBlockJob),
532
    .job_type      = BLOCK_JOB_TYPE_MIRROR,
533
    .set_speed     = mirror_set_speed,
534
    .iostatus_reset= mirror_iostatus_reset,
535
    .complete      = mirror_complete,
536
};
537

    
538
void mirror_start(BlockDriverState *bs, BlockDriverState *target,
539
                  int64_t speed, int64_t granularity, int64_t buf_size,
540
                  MirrorSyncMode mode, BlockdevOnError on_source_error,
541
                  BlockdevOnError on_target_error,
542
                  BlockDriverCompletionFunc *cb,
543
                  void *opaque, Error **errp)
544
{
545
    MirrorBlockJob *s;
546
    BlockDriverState *base = NULL;
547

    
548
    if (granularity == 0) {
549
        /* Choose the default granularity based on the target file's cluster
550
         * size, clamped between 4k and 64k.  */
551
        BlockDriverInfo bdi;
552
        if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) {
553
            granularity = MAX(4096, bdi.cluster_size);
554
            granularity = MIN(65536, granularity);
555
        } else {
556
            granularity = 65536;
557
        }
558
    }
559

    
560
    assert ((granularity & (granularity - 1)) == 0);
561

    
562
    if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
563
         on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
564
        !bdrv_iostatus_is_enabled(bs)) {
565
        error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
566
        return;
567
    }
568

    
569
    if (mode == MIRROR_SYNC_MODE_TOP) {
570
        base = bs->backing_hd;
571
    } else {
572
        base = NULL;
573
    }
574

    
575
    s = block_job_create(&mirror_job_driver, bs, speed, cb, opaque, errp);
576
    if (!s) {
577
        return;
578
    }
579

    
580
    s->on_source_error = on_source_error;
581
    s->on_target_error = on_target_error;
582
    s->target = target;
583
    s->mode = mode;
584
    s->base = base;
585
    s->granularity = granularity;
586
    s->buf_size = MAX(buf_size, granularity);
587

    
588
    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity);
589
    bdrv_set_enable_write_cache(s->target, true);
590
    bdrv_set_on_error(s->target, on_target_error, on_target_error);
591
    bdrv_iostatus_enable(s->target);
592
    s->common.co = qemu_coroutine_create(mirror_run);
593
    trace_mirror_start(bs, s, s->common.co, opaque);
594
    qemu_coroutine_enter(s->common.co, s);
595
}