Statistics
| Branch: | Revision:

root / block-migration.c @ 60e1b2a6

History | View | Annotate | Download (19.7 kB)

1
/*
2
 * QEMU live block migration
3
 *
4
 * Copyright IBM, Corp. 2009
5
 *
6
 * Authors:
7
 *  Liran Schour   <lirans@il.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 * Contributions after 2012-01-13 are licensed under the terms of the
13
 * GNU GPL, version 2 or (at your option) any later version.
14
 */
15

    
16
#include "qemu-common.h"
17
#include "block_int.h"
18
#include "hw/hw.h"
19
#include "qemu-queue.h"
20
#include "qemu-timer.h"
21
#include "monitor.h"
22
#include "block-migration.h"
23
#include "migration.h"
24
#include "blockdev.h"
25
#include <assert.h>
26

    
27
#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
28

    
29
#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
30
#define BLK_MIG_FLAG_EOS                0x02
31
#define BLK_MIG_FLAG_PROGRESS           0x04
32

    
33
#define MAX_IS_ALLOCATED_SEARCH 65536
34

    
35
//#define DEBUG_BLK_MIGRATION
36

    
37
#ifdef DEBUG_BLK_MIGRATION
38
#define DPRINTF(fmt, ...) \
39
    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
40
#else
41
#define DPRINTF(fmt, ...) \
42
    do { } while (0)
43
#endif
44

    
45
typedef struct BlkMigDevState {
46
    BlockDriverState *bs;
47
    int bulk_completed;
48
    int shared_base;
49
    int64_t cur_sector;
50
    int64_t cur_dirty;
51
    int64_t completed_sectors;
52
    int64_t total_sectors;
53
    int64_t dirty;
54
    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
55
    unsigned long *aio_bitmap;
56
} BlkMigDevState;
57

    
58
typedef struct BlkMigBlock {
59
    uint8_t *buf;
60
    BlkMigDevState *bmds;
61
    int64_t sector;
62
    int nr_sectors;
63
    struct iovec iov;
64
    QEMUIOVector qiov;
65
    BlockDriverAIOCB *aiocb;
66
    int ret;
67
    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
68
} BlkMigBlock;
69

    
70
typedef struct BlkMigState {
71
    int blk_enable;
72
    int shared_base;
73
    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
74
    QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
75
    int submitted;
76
    int read_done;
77
    int transferred;
78
    int64_t total_sector_sum;
79
    int prev_progress;
80
    int bulk_completed;
81
    long double total_time;
82
    long double prev_time_offset;
83
    int reads;
84
} BlkMigState;
85

    
86
static BlkMigState block_mig_state;
87

    
88
static void blk_send(QEMUFile *f, BlkMigBlock * blk)
89
{
90
    int len;
91

    
92
    /* sector number and flags */
93
    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
94
                     | BLK_MIG_FLAG_DEVICE_BLOCK);
95

    
96
    /* device name */
97
    len = strlen(blk->bmds->bs->device_name);
98
    qemu_put_byte(f, len);
99
    qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
100

    
101
    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
102
}
103

    
104
int blk_mig_active(void)
105
{
106
    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
107
}
108

    
109
uint64_t blk_mig_bytes_transferred(void)
110
{
111
    BlkMigDevState *bmds;
112
    uint64_t sum = 0;
113

    
114
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
115
        sum += bmds->completed_sectors;
116
    }
117
    return sum << BDRV_SECTOR_BITS;
118
}
119

    
120
uint64_t blk_mig_bytes_remaining(void)
121
{
122
    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
123
}
124

    
125
uint64_t blk_mig_bytes_total(void)
126
{
127
    BlkMigDevState *bmds;
128
    uint64_t sum = 0;
129

    
130
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
131
        sum += bmds->total_sectors;
132
    }
133
    return sum << BDRV_SECTOR_BITS;
134
}
135

    
136
static inline long double compute_read_bwidth(void)
137
{
138
    assert(block_mig_state.total_time != 0);
139
    return (block_mig_state.reads / block_mig_state.total_time) * BLOCK_SIZE;
140
}
141

    
142
static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
143
{
144
    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
145

    
146
    if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
147
        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
148
            (1UL << (chunk % (sizeof(unsigned long) * 8))));
149
    } else {
150
        return 0;
151
    }
152
}
153

    
154
static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
155
                             int nb_sectors, int set)
156
{
157
    int64_t start, end;
158
    unsigned long val, idx, bit;
159

    
160
    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
161
    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
162

    
163
    for (; start <= end; start++) {
164
        idx = start / (sizeof(unsigned long) * 8);
165
        bit = start % (sizeof(unsigned long) * 8);
166
        val = bmds->aio_bitmap[idx];
167
        if (set) {
168
            val |= 1UL << bit;
169
        } else {
170
            val &= ~(1UL << bit);
171
        }
172
        bmds->aio_bitmap[idx] = val;
173
    }
174
}
175

    
176
static void alloc_aio_bitmap(BlkMigDevState *bmds)
177
{
178
    BlockDriverState *bs = bmds->bs;
179
    int64_t bitmap_size;
180

    
181
    bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
182
            BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
183
    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
184

    
185
    bmds->aio_bitmap = g_malloc0(bitmap_size);
186
}
187

    
188
static void blk_mig_read_cb(void *opaque, int ret)
189
{
190
    long double curr_time = qemu_get_clock_ns(rt_clock);
191
    BlkMigBlock *blk = opaque;
192

    
193
    blk->ret = ret;
194

    
195
    block_mig_state.reads++;
196
    block_mig_state.total_time += (curr_time - block_mig_state.prev_time_offset);
197
    block_mig_state.prev_time_offset = curr_time;
198

    
199
    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
200
    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
201

    
202
    block_mig_state.submitted--;
203
    block_mig_state.read_done++;
204
    assert(block_mig_state.submitted >= 0);
205
}
206

    
207
static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
208
                                BlkMigDevState *bmds)
209
{
210
    int64_t total_sectors = bmds->total_sectors;
211
    int64_t cur_sector = bmds->cur_sector;
212
    BlockDriverState *bs = bmds->bs;
213
    BlkMigBlock *blk;
214
    int nr_sectors;
215

    
216
    if (bmds->shared_base) {
217
        while (cur_sector < total_sectors &&
218
               !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
219
                                  &nr_sectors)) {
220
            cur_sector += nr_sectors;
221
        }
222
    }
223

    
224
    if (cur_sector >= total_sectors) {
225
        bmds->cur_sector = bmds->completed_sectors = total_sectors;
226
        return 1;
227
    }
228

    
229
    bmds->completed_sectors = cur_sector;
230

    
231
    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
232

    
233
    /* we are going to transfer a full block even if it is not allocated */
234
    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
235

    
236
    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
237
        nr_sectors = total_sectors - cur_sector;
238
    }
239

    
240
    blk = g_malloc(sizeof(BlkMigBlock));
241
    blk->buf = g_malloc(BLOCK_SIZE);
242
    blk->bmds = bmds;
243
    blk->sector = cur_sector;
244
    blk->nr_sectors = nr_sectors;
245

    
246
    blk->iov.iov_base = blk->buf;
247
    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
248
    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
249

    
250
    if (block_mig_state.submitted == 0) {
251
        block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
252
    }
253

    
254
    blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
255
                                nr_sectors, blk_mig_read_cb, blk);
256
    block_mig_state.submitted++;
257

    
258
    bdrv_reset_dirty(bs, cur_sector, nr_sectors);
259
    bmds->cur_sector = cur_sector + nr_sectors;
260

    
261
    return (bmds->cur_sector >= total_sectors);
262
}
263

    
264
static void set_dirty_tracking(int enable)
265
{
266
    BlkMigDevState *bmds;
267

    
268
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
269
        bdrv_set_dirty_tracking(bmds->bs, enable);
270
    }
271
}
272

    
273
static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
274
{
275
    Monitor *mon = opaque;
276
    BlkMigDevState *bmds;
277
    int64_t sectors;
278

    
279
    if (!bdrv_is_read_only(bs)) {
280
        sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
281
        if (sectors <= 0) {
282
            return;
283
        }
284

    
285
        bmds = g_malloc0(sizeof(BlkMigDevState));
286
        bmds->bs = bs;
287
        bmds->bulk_completed = 0;
288
        bmds->total_sectors = sectors;
289
        bmds->completed_sectors = 0;
290
        bmds->shared_base = block_mig_state.shared_base;
291
        alloc_aio_bitmap(bmds);
292
        drive_get_ref(drive_get_by_blockdev(bs));
293
        bdrv_set_in_use(bs, 1);
294

    
295
        block_mig_state.total_sector_sum += sectors;
296

    
297
        if (bmds->shared_base) {
298
            monitor_printf(mon, "Start migration for %s with shared base "
299
                                "image\n",
300
                           bs->device_name);
301
        } else {
302
            monitor_printf(mon, "Start full migration for %s\n",
303
                           bs->device_name);
304
        }
305

    
306
        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
307
    }
308
}
309

    
310
static void init_blk_migration(Monitor *mon, QEMUFile *f)
311
{
312
    block_mig_state.submitted = 0;
313
    block_mig_state.read_done = 0;
314
    block_mig_state.transferred = 0;
315
    block_mig_state.total_sector_sum = 0;
316
    block_mig_state.prev_progress = -1;
317
    block_mig_state.bulk_completed = 0;
318
    block_mig_state.total_time = 0;
319
    block_mig_state.reads = 0;
320

    
321
    bdrv_iterate(init_blk_migration_it, mon);
322
}
323

    
324
static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f)
325
{
326
    int64_t completed_sector_sum = 0;
327
    BlkMigDevState *bmds;
328
    int progress;
329
    int ret = 0;
330

    
331
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
332
        if (bmds->bulk_completed == 0) {
333
            if (mig_save_device_bulk(mon, f, bmds) == 1) {
334
                /* completed bulk section for this device */
335
                bmds->bulk_completed = 1;
336
            }
337
            completed_sector_sum += bmds->completed_sectors;
338
            ret = 1;
339
            break;
340
        } else {
341
            completed_sector_sum += bmds->completed_sectors;
342
        }
343
    }
344

    
345
    if (block_mig_state.total_sector_sum != 0) {
346
        progress = completed_sector_sum * 100 /
347
                   block_mig_state.total_sector_sum;
348
    } else {
349
        progress = 100;
350
    }
351
    if (progress != block_mig_state.prev_progress) {
352
        block_mig_state.prev_progress = progress;
353
        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
354
                         | BLK_MIG_FLAG_PROGRESS);
355
        monitor_printf(mon, "Completed %d %%\r", progress);
356
        monitor_flush(mon);
357
    }
358

    
359
    return ret;
360
}
361

    
362
static void blk_mig_reset_dirty_cursor(void)
363
{
364
    BlkMigDevState *bmds;
365

    
366
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
367
        bmds->cur_dirty = 0;
368
    }
369
}
370

    
371
static int mig_save_device_dirty(Monitor *mon, QEMUFile *f,
372
                                 BlkMigDevState *bmds, int is_async)
373
{
374
    BlkMigBlock *blk;
375
    int64_t total_sectors = bmds->total_sectors;
376
    int64_t sector;
377
    int nr_sectors;
378
    int ret = -EIO;
379

    
380
    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
381
        if (bmds_aio_inflight(bmds, sector)) {
382
            bdrv_drain_all();
383
        }
384
        if (bdrv_get_dirty(bmds->bs, sector)) {
385

    
386
            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
387
                nr_sectors = total_sectors - sector;
388
            } else {
389
                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
390
            }
391
            blk = g_malloc(sizeof(BlkMigBlock));
392
            blk->buf = g_malloc(BLOCK_SIZE);
393
            blk->bmds = bmds;
394
            blk->sector = sector;
395
            blk->nr_sectors = nr_sectors;
396

    
397
            if (is_async) {
398
                blk->iov.iov_base = blk->buf;
399
                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
400
                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
401

    
402
                if (block_mig_state.submitted == 0) {
403
                    block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
404
                }
405

    
406
                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
407
                                            nr_sectors, blk_mig_read_cb, blk);
408
                block_mig_state.submitted++;
409
                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
410
            } else {
411
                ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
412
                if (ret < 0) {
413
                    goto error;
414
                }
415
                blk_send(f, blk);
416

    
417
                g_free(blk->buf);
418
                g_free(blk);
419
            }
420

    
421
            bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
422
            break;
423
        }
424
        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
425
        bmds->cur_dirty = sector;
426
    }
427

    
428
    return (bmds->cur_dirty >= bmds->total_sectors);
429

    
430
error:
431
    monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector);
432
    qemu_file_set_error(f, ret);
433
    g_free(blk->buf);
434
    g_free(blk);
435
    return 0;
436
}
437

    
438
static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async)
439
{
440
    BlkMigDevState *bmds;
441
    int ret = 0;
442

    
443
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
444
        if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
445
            ret = 1;
446
            break;
447
        }
448
    }
449

    
450
    return ret;
451
}
452

    
453
static void flush_blks(QEMUFile* f)
454
{
455
    BlkMigBlock *blk;
456

    
457
    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
458
            __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
459
            block_mig_state.transferred);
460

    
461
    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
462
        if (qemu_file_rate_limit(f)) {
463
            break;
464
        }
465
        if (blk->ret < 0) {
466
            qemu_file_set_error(f, blk->ret);
467
            break;
468
        }
469
        blk_send(f, blk);
470

    
471
        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
472
        g_free(blk->buf);
473
        g_free(blk);
474

    
475
        block_mig_state.read_done--;
476
        block_mig_state.transferred++;
477
        assert(block_mig_state.read_done >= 0);
478
    }
479

    
480
    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
481
            block_mig_state.submitted, block_mig_state.read_done,
482
            block_mig_state.transferred);
483
}
484

    
485
static int64_t get_remaining_dirty(void)
486
{
487
    BlkMigDevState *bmds;
488
    int64_t dirty = 0;
489

    
490
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
491
        dirty += bdrv_get_dirty_count(bmds->bs);
492
    }
493

    
494
    return dirty * BLOCK_SIZE;
495
}
496

    
497
static int is_stage2_completed(void)
498
{
499
    int64_t remaining_dirty;
500
    long double bwidth;
501

    
502
    if (block_mig_state.bulk_completed == 1) {
503

    
504
        remaining_dirty = get_remaining_dirty();
505
        if (remaining_dirty == 0) {
506
            return 1;
507
        }
508

    
509
        bwidth = compute_read_bwidth();
510

    
511
        if ((remaining_dirty / bwidth) <=
512
            migrate_max_downtime()) {
513
            /* finish stage2 because we think that we can finish remaining work
514
               below max_downtime */
515

    
516
            return 1;
517
        }
518
    }
519

    
520
    return 0;
521
}
522

    
523
static void blk_mig_cleanup(Monitor *mon)
524
{
525
    BlkMigDevState *bmds;
526
    BlkMigBlock *blk;
527

    
528
    set_dirty_tracking(0);
529

    
530
    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
531
        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
532
        bdrv_set_in_use(bmds->bs, 0);
533
        drive_put_ref(drive_get_by_blockdev(bmds->bs));
534
        g_free(bmds->aio_bitmap);
535
        g_free(bmds);
536
    }
537

    
538
    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
539
        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
540
        g_free(blk->buf);
541
        g_free(blk);
542
    }
543

    
544
    monitor_printf(mon, "\n");
545
}
546

    
547
static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
548
{
549
    int ret;
550

    
551
    DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
552
            stage, block_mig_state.submitted, block_mig_state.transferred);
553

    
554
    if (stage < 0) {
555
        blk_mig_cleanup(mon);
556
        return 0;
557
    }
558

    
559
    if (block_mig_state.blk_enable != 1) {
560
        /* no need to migrate storage */
561
        qemu_put_be64(f, BLK_MIG_FLAG_EOS);
562
        return 1;
563
    }
564

    
565
    if (stage == 1) {
566
        init_blk_migration(mon, f);
567

    
568
        /* start track dirty blocks */
569
        set_dirty_tracking(1);
570
    }
571

    
572
    flush_blks(f);
573

    
574
    ret = qemu_file_get_error(f);
575
    if (ret) {
576
        blk_mig_cleanup(mon);
577
        return ret;
578
    }
579

    
580
    blk_mig_reset_dirty_cursor();
581

    
582
    if (stage == 2) {
583
        /* control the rate of transfer */
584
        while ((block_mig_state.submitted +
585
                block_mig_state.read_done) * BLOCK_SIZE <
586
               qemu_file_get_rate_limit(f)) {
587
            if (block_mig_state.bulk_completed == 0) {
588
                /* first finish the bulk phase */
589
                if (blk_mig_save_bulked_block(mon, f) == 0) {
590
                    /* finished saving bulk on all devices */
591
                    block_mig_state.bulk_completed = 1;
592
                }
593
            } else {
594
                if (blk_mig_save_dirty_block(mon, f, 1) == 0) {
595
                    /* no more dirty blocks */
596
                    break;
597
                }
598
            }
599
        }
600

    
601
        flush_blks(f);
602

    
603
        ret = qemu_file_get_error(f);
604
        if (ret) {
605
            blk_mig_cleanup(mon);
606
            return ret;
607
        }
608
    }
609

    
610
    if (stage == 3) {
611
        /* we know for sure that save bulk is completed and
612
           all async read completed */
613
        assert(block_mig_state.submitted == 0);
614

    
615
        while (blk_mig_save_dirty_block(mon, f, 0) != 0);
616
        blk_mig_cleanup(mon);
617

    
618
        /* report completion */
619
        qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
620

    
621
        ret = qemu_file_get_error(f);
622
        if (ret) {
623
            return ret;
624
        }
625

    
626
        monitor_printf(mon, "Block migration completed\n");
627
    }
628

    
629
    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
630

    
631
    return ((stage == 2) && is_stage2_completed());
632
}
633

    
634
static int block_load(QEMUFile *f, void *opaque, int version_id)
635
{
636
    static int banner_printed;
637
    int len, flags;
638
    char device_name[256];
639
    int64_t addr;
640
    BlockDriverState *bs, *bs_prev = NULL;
641
    uint8_t *buf;
642
    int64_t total_sectors = 0;
643
    int nr_sectors;
644
    int ret;
645

    
646
    do {
647
        addr = qemu_get_be64(f);
648

    
649
        flags = addr & ~BDRV_SECTOR_MASK;
650
        addr >>= BDRV_SECTOR_BITS;
651

    
652
        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
653
            /* get device name */
654
            len = qemu_get_byte(f);
655
            qemu_get_buffer(f, (uint8_t *)device_name, len);
656
            device_name[len] = '\0';
657

    
658
            bs = bdrv_find(device_name);
659
            if (!bs) {
660
                fprintf(stderr, "Error unknown block device %s\n",
661
                        device_name);
662
                return -EINVAL;
663
            }
664

    
665
            if (bs != bs_prev) {
666
                bs_prev = bs;
667
                total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
668
                if (total_sectors <= 0) {
669
                    error_report("Error getting length of block device %s",
670
                                 device_name);
671
                    return -EINVAL;
672
                }
673
            }
674

    
675
            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
676
                nr_sectors = total_sectors - addr;
677
            } else {
678
                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
679
            }
680

    
681
            buf = g_malloc(BLOCK_SIZE);
682

    
683
            qemu_get_buffer(f, buf, BLOCK_SIZE);
684
            ret = bdrv_write(bs, addr, buf, nr_sectors);
685

    
686
            g_free(buf);
687
            if (ret < 0) {
688
                return ret;
689
            }
690
        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
691
            if (!banner_printed) {
692
                printf("Receiving block device images\n");
693
                banner_printed = 1;
694
            }
695
            printf("Completed %d %%%c", (int)addr,
696
                   (addr == 100) ? '\n' : '\r');
697
            fflush(stdout);
698
        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
699
            fprintf(stderr, "Unknown flags\n");
700
            return -EINVAL;
701
        }
702
        ret = qemu_file_get_error(f);
703
        if (ret != 0) {
704
            return ret;
705
        }
706
    } while (!(flags & BLK_MIG_FLAG_EOS));
707

    
708
    return 0;
709
}
710

    
711
static void block_set_params(int blk_enable, int shared_base, void *opaque)
712
{
713
    block_mig_state.blk_enable = blk_enable;
714
    block_mig_state.shared_base = shared_base;
715

    
716
    /* shared base means that blk_enable = 1 */
717
    block_mig_state.blk_enable |= shared_base;
718
}
719

    
720
void blk_mig_init(void)
721
{
722
    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
723
    QSIMPLEQ_INIT(&block_mig_state.blk_list);
724

    
725
    register_savevm_live(NULL, "block", 0, 1, block_set_params,
726
                         block_save_live, NULL, block_load, &block_mig_state);
727
}