Revision 889ae39c

b/block-migration.c
15 15
#include "block_int.h"
16 16
#include "hw/hw.h"
17 17
#include "qemu-queue.h"
18
#include "qemu-timer.h"
18 19
#include "monitor.h"
19 20
#include "block-migration.h"
21
#include "migration.h"
20 22
#include <assert.h>
21 23

  
22 24
#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
......
57 59
    QEMUIOVector qiov;
58 60
    BlockDriverAIOCB *aiocb;
59 61
    int ret;
62
    int64_t time;
60 63
    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
61 64
} BlkMigBlock;
62 65

  
......
71 74
    int64_t total_sector_sum;
72 75
    int prev_progress;
73 76
    int bulk_completed;
74
    int dirty_iterations;
77
    long double total_time;
78
    int reads;
75 79
} BlkMigState;
76 80

  
77 81
static BlkMigState block_mig_state;
......
124 128
    return sum << BDRV_SECTOR_BITS;
125 129
}
126 130

  
131
static inline void add_avg_read_time(int64_t time)
132
{
133
    block_mig_state.reads++;
134
    block_mig_state.total_time += time;
135
}
136

  
137
static inline long double compute_read_bwidth(void)
138
{
139
    assert(block_mig_state.total_time != 0);
140
    return  (block_mig_state.reads * BLOCK_SIZE)/ block_mig_state.total_time;
141
}
142

  
127 143
static void blk_mig_read_cb(void *opaque, int ret)
128 144
{
129 145
    BlkMigBlock *blk = opaque;
130 146

  
131 147
    blk->ret = ret;
132 148

  
149
    blk->time = qemu_get_clock_ns(rt_clock) - blk->time;
150

  
151
    add_avg_read_time(blk->time);
152

  
133 153
    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
134 154

  
135 155
    block_mig_state.submitted--;
......
179 199
    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
180 200
    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
181 201

  
202
    blk->time = qemu_get_clock_ns(rt_clock);
203

  
182 204
    blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
183 205
                                nr_sectors, blk_mig_read_cb, blk);
184 206
    if (!blk->aiocb) {
......
220 242
    block_mig_state.total_sector_sum = 0;
221 243
    block_mig_state.prev_progress = -1;
222 244
    block_mig_state.bulk_completed = 0;
245
    block_mig_state.total_time = 0;
246
    block_mig_state.reads = 0;
223 247

  
224 248
    for (bs = bdrv_first; bs != NULL; bs = bs->next) {
225 249
        if (bs->type == BDRV_TYPE_HD) {
......
314 338
            blk->bmds = bmds;
315 339
            blk->sector = sector;
316 340

  
317
            if(is_async) {
341
            if (is_async) {
318 342
                blk->iov.iov_base = blk->buf;
319 343
                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
320 344
                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
321 345

  
346
		blk->time = qemu_get_clock_ns(rt_clock);
347

  
322 348
                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
323 349
                                            nr_sectors, blk_mig_read_cb, blk);
324 350
                if (!blk->aiocb) {
......
345 371

  
346 372
    return (bmds->cur_dirty >= bmds->total_sectors);
347 373

  
348
 error:
374
error:
349 375
    monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector);
350 376
    qemu_file_set_error(f);
351 377
    qemu_free(blk->buf);
......
359 385
    int ret = 0;
360 386

  
361 387
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
362
        if(mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
388
        if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
363 389
            ret = 1;
364 390
            break;
365 391
        }
......
400 426
            block_mig_state.transferred);
401 427
}
402 428

  
429
static int64_t get_remaining_dirty(void)
430
{
431
    BlkMigDevState *bmds;
432
    int64_t dirty = 0;
433

  
434
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
435
        dirty += bdrv_get_dirty_count(bmds->bs);
436
    }
437

  
438
    return dirty * BLOCK_SIZE;
439
}
440

  
403 441
static int is_stage2_completed(void)
404 442
{
405
    return (block_mig_state.submitted == 0 && block_mig_state.bulk_completed);
443
    int64_t remaining_dirty;
444
    long double bwidth;
445

  
446
    if (block_mig_state.bulk_completed == 1) {
447

  
448
        remaining_dirty = get_remaining_dirty();
449
	if (remaining_dirty == 0) {
450
	    return 1;
451
	}
452

  
453
	bwidth = compute_read_bwidth();
454

  
455
	if ((remaining_dirty / bwidth) <=
456
            migrate_max_downtime()) {
457
            /* finish stage2 because we think that we can finish remaing work
458
               below max_downtime */
459

  
460
            return 1;
461
        }
462
    }
463

  
464
    return 0;
406 465
}
407 466

  
408 467
static void blk_mig_cleanup(Monitor *mon)
......
458 517

  
459 518
    blk_mig_reset_dirty_cursor();
460 519

  
461
    if(stage == 2) {
520
    if (stage == 2) {
462 521
        /* control the rate of transfer */
463 522
        while ((block_mig_state.submitted +
464 523
                block_mig_state.read_done) * BLOCK_SIZE <
......
466 525
            if (block_mig_state.bulk_completed == 0) {
467 526
                /* first finish the bulk phase */
468 527
                if (blk_mig_save_bulked_block(mon, f) == 0) {
469
                    /* finish saving bulk on all devices */
528
                    /* finished saving bulk on all devices */
470 529
                    block_mig_state.bulk_completed = 1;
471 530
                }
472 531
            } else {
......
486 545
    }
487 546

  
488 547
    if (stage == 3) {
489
        /* we know for sure that save bulk is completed */
548
        /* we know for sure that save bulk is completed and
549
           all async read completed */
550
        assert(block_mig_state.submitted == 0);
490 551

  
491
        while(blk_mig_save_dirty_block(mon, f, 0) != 0);
552
        while (blk_mig_save_dirty_block(mon, f, 0) != 0);
492 553
        blk_mig_cleanup(mon);
493 554

  
494 555
        /* report completion */

Also available in: Unified diff