Revision d76cac7d

b/block-migration.c
42 42
    int bulk_completed;
43 43
    int shared_base;
44 44
    int64_t cur_sector;
45
    int64_t cur_dirty;
45 46
    int64_t completed_sectors;
46 47
    int64_t total_sectors;
47 48
    int64_t dirty;
......
70 71
    int64_t total_sector_sum;
71 72
    int prev_progress;
72 73
    int bulk_completed;
74
    int dirty_iterations;
73 75
} BlkMigState;
74 76

  
75 77
static BlkMigState block_mig_state;
......
183 185
        goto error;
184 186
    }
185 187
    block_mig_state.submitted++;
188

  
186 189
    bdrv_reset_dirty(bs, cur_sector, nr_sectors);
187 190
    bmds->cur_sector = cur_sector + nr_sectors;
188 191

  
......
281 284
    return ret;
282 285
}
283 286

  
284
#define MAX_NUM_BLOCKS 4
285

  
286
static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f)
287
static void blk_mig_reset_dirty_cursor(void)
287 288
{
288 289
    BlkMigDevState *bmds;
289
    BlkMigBlock blk;
290

  
291
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
292
        bmds->cur_dirty = 0;
293
    }
294
}
295

  
296
static int mig_save_device_dirty(Monitor *mon, QEMUFile *f,
297
                                 BlkMigDevState *bmds, int is_async)
298
{
299
    BlkMigBlock *blk;
300
    int64_t total_sectors = bmds->total_sectors;
290 301
    int64_t sector;
302
    int nr_sectors;
291 303

  
292
    blk.buf = qemu_malloc(BLOCK_SIZE);
304
    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
305
        if (bdrv_get_dirty(bmds->bs, sector)) {
293 306

  
294
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
295
        for (sector = 0; sector < bmds->cur_sector;) {
296
            if (bdrv_get_dirty(bmds->bs, sector)) {
297
                if (bdrv_read(bmds->bs, sector, blk.buf,
298
                              BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
299
                    monitor_printf(mon, "Error reading sector %" PRId64 "\n",
300
                                   sector);
301
                    qemu_file_set_error(f);
302
                    qemu_free(blk.buf);
303
                    return;
307
            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
308
                nr_sectors = total_sectors - sector;
309
            } else {
310
                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
311
            }
312
            blk = qemu_malloc(sizeof(BlkMigBlock));
313
            blk->buf = qemu_malloc(BLOCK_SIZE);
314
            blk->bmds = bmds;
315
            blk->sector = sector;
316

  
317
            if(is_async) {
318
                blk->iov.iov_base = blk->buf;
319
                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
320
                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
321

  
322
                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
323
                                            nr_sectors, blk_mig_read_cb, blk);
324
                if (!blk->aiocb) {
325
                    goto error;
326
                }
327
                block_mig_state.submitted++;
328
            } else {
329
                if (bdrv_read(bmds->bs, sector, blk->buf,
330
                              nr_sectors) < 0) {
331
                    goto error;
304 332
                }
305
                blk.bmds = bmds;
306
                blk.sector = sector;
307
                blk_send(f, &blk);
333
                blk_send(f, blk);
308 334

  
309
                bdrv_reset_dirty(bmds->bs, sector,
310
                                 BDRV_SECTORS_PER_DIRTY_CHUNK);
335
                qemu_free(blk->buf);
336
                qemu_free(blk);
311 337
            }
312
            sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
338

  
339
            bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
340
            break;
313 341
        }
342
        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
343
        bmds->cur_dirty = sector;
314 344
    }
315 345

  
316
    qemu_free(blk.buf);
346
    return (bmds->cur_dirty >= bmds->total_sectors);
347

  
348
 error:
349
    monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector);
350
    qemu_file_set_error(f);
351
    qemu_free(blk->buf);
352
    qemu_free(blk);
353
    return 0;
354
}
355

  
356
static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async)
357
{
358
    BlkMigDevState *bmds;
359
    int ret = 0;
360

  
361
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
362
        if(mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
363
            ret = 1;
364
            break;
365
        }
366
    }
367

  
368
    return ret;
317 369
}
318 370

  
319 371
static void flush_blks(QEMUFile* f)
......
404 456
        return 0;
405 457
    }
406 458

  
407
    /* control the rate of transfer */
408
    while ((block_mig_state.submitted +
409
            block_mig_state.read_done) * BLOCK_SIZE <
410
           qemu_file_get_rate_limit(f)) {
411
        if (blk_mig_save_bulked_block(mon, f) == 0) {
412
            /* finished saving bulk on all devices */
413
            block_mig_state.bulk_completed = 1;
414
            break;
459
    blk_mig_reset_dirty_cursor();
460

  
461
    if(stage == 2) {
462
        /* control the rate of transfer */
463
        while ((block_mig_state.submitted +
464
                block_mig_state.read_done) * BLOCK_SIZE <
465
               qemu_file_get_rate_limit(f)) {
466
            if (block_mig_state.bulk_completed == 0) {
467
                /* first finish the bulk phase */
468
                if (blk_mig_save_bulked_block(mon, f) == 0) {
469
                    /* finish saving bulk on all devices */
470
                    block_mig_state.bulk_completed = 1;
471
                }
472
            } else {
473
                if (blk_mig_save_dirty_block(mon, f, 1) == 0) {
474
                    /* no more dirty blocks */
475
                    break;
476
                }
477
            }
415 478
        }
416
    }
417 479

  
418
    flush_blks(f);
480
        flush_blks(f);
419 481

  
420
    if (qemu_file_has_error(f)) {
421
        blk_mig_cleanup(mon);
422
        return 0;
482
        if (qemu_file_has_error(f)) {
483
            blk_mig_cleanup(mon);
484
            return 0;
485
        }
423 486
    }
424 487

  
425 488
    if (stage == 3) {
426 489
        /* we know for sure that save bulk is completed */
427 490

  
428
        blk_mig_save_dirty_blocks(mon, f);
491
        while(blk_mig_save_dirty_block(mon, f, 0) != 0);
429 492
        blk_mig_cleanup(mon);
430 493

  
431 494
        /* report completion */

Also available in: Unified diff