root / block-migration.c @ 33656af7
History | View | Annotate | Download (18.8 kB)
1 |
/*
|
---|---|
2 |
* QEMU live block migration
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2009
|
5 |
*
|
6 |
* Authors:
|
7 |
* Liran Schour <lirans@il.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "qemu-common.h" |
15 |
#include "block_int.h" |
16 |
#include "hw/hw.h" |
17 |
#include "qemu-queue.h" |
18 |
#include "qemu-timer.h" |
19 |
#include "monitor.h" |
20 |
#include "block-migration.h" |
21 |
#include "migration.h" |
22 |
#include <assert.h> |
23 |
|
24 |
#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
|
25 |
|
26 |
#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 |
27 |
#define BLK_MIG_FLAG_EOS 0x02 |
28 |
#define BLK_MIG_FLAG_PROGRESS 0x04 |
29 |
|
30 |
#define MAX_IS_ALLOCATED_SEARCH 65536 |
31 |
|
32 |
//#define DEBUG_BLK_MIGRATION
|
33 |
|
34 |
#ifdef DEBUG_BLK_MIGRATION
|
35 |
#define DPRINTF(fmt, ...) \
|
36 |
do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0) |
37 |
#else
|
38 |
#define DPRINTF(fmt, ...) \
|
39 |
do { } while (0) |
40 |
#endif
|
41 |
|
42 |
typedef struct BlkMigDevState { |
43 |
BlockDriverState *bs; |
44 |
int bulk_completed;
|
45 |
int shared_base;
|
46 |
int64_t cur_sector; |
47 |
int64_t cur_dirty; |
48 |
int64_t completed_sectors; |
49 |
int64_t total_sectors; |
50 |
int64_t dirty; |
51 |
QSIMPLEQ_ENTRY(BlkMigDevState) entry; |
52 |
unsigned long *aio_bitmap; |
53 |
} BlkMigDevState; |
54 |
|
55 |
typedef struct BlkMigBlock { |
56 |
uint8_t *buf; |
57 |
BlkMigDevState *bmds; |
58 |
int64_t sector; |
59 |
int nr_sectors;
|
60 |
struct iovec iov;
|
61 |
QEMUIOVector qiov; |
62 |
BlockDriverAIOCB *aiocb; |
63 |
int ret;
|
64 |
int64_t time; |
65 |
QSIMPLEQ_ENTRY(BlkMigBlock) entry; |
66 |
} BlkMigBlock; |
67 |
|
68 |
typedef struct BlkMigState { |
69 |
int blk_enable;
|
70 |
int shared_base;
|
71 |
QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list; |
72 |
QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list; |
73 |
int submitted;
|
74 |
int read_done;
|
75 |
int transferred;
|
76 |
int64_t total_sector_sum; |
77 |
int prev_progress;
|
78 |
int bulk_completed;
|
79 |
long double total_time; |
80 |
int reads;
|
81 |
} BlkMigState; |
82 |
|
83 |
static BlkMigState block_mig_state;
|
84 |
|
85 |
static void blk_send(QEMUFile *f, BlkMigBlock * blk) |
86 |
{ |
87 |
int len;
|
88 |
|
89 |
/* sector number and flags */
|
90 |
qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS) |
91 |
| BLK_MIG_FLAG_DEVICE_BLOCK); |
92 |
|
93 |
/* device name */
|
94 |
len = strlen(blk->bmds->bs->device_name); |
95 |
qemu_put_byte(f, len); |
96 |
qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len); |
97 |
|
98 |
qemu_put_buffer(f, blk->buf, BLOCK_SIZE); |
99 |
} |
100 |
|
101 |
int blk_mig_active(void) |
102 |
{ |
103 |
return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
|
104 |
} |
105 |
|
106 |
uint64_t blk_mig_bytes_transferred(void)
|
107 |
{ |
108 |
BlkMigDevState *bmds; |
109 |
uint64_t sum = 0;
|
110 |
|
111 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
112 |
sum += bmds->completed_sectors; |
113 |
} |
114 |
return sum << BDRV_SECTOR_BITS;
|
115 |
} |
116 |
|
117 |
uint64_t blk_mig_bytes_remaining(void)
|
118 |
{ |
119 |
return blk_mig_bytes_total() - blk_mig_bytes_transferred();
|
120 |
} |
121 |
|
122 |
uint64_t blk_mig_bytes_total(void)
|
123 |
{ |
124 |
BlkMigDevState *bmds; |
125 |
uint64_t sum = 0;
|
126 |
|
127 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
128 |
sum += bmds->total_sectors; |
129 |
} |
130 |
return sum << BDRV_SECTOR_BITS;
|
131 |
} |
132 |
|
133 |
static inline void add_avg_read_time(int64_t time) |
134 |
{ |
135 |
block_mig_state.reads++; |
136 |
block_mig_state.total_time += time; |
137 |
} |
138 |
|
139 |
static inline long double compute_read_bwidth(void) |
140 |
{ |
141 |
assert(block_mig_state.total_time != 0);
|
142 |
return (block_mig_state.reads * BLOCK_SIZE)/ block_mig_state.total_time;
|
143 |
} |
144 |
|
145 |
static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) |
146 |
{ |
147 |
int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; |
148 |
|
149 |
if (bmds->aio_bitmap &&
|
150 |
(sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) { |
151 |
return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & |
152 |
(1UL << (chunk % (sizeof(unsigned long) * 8)))); |
153 |
} else {
|
154 |
return 0; |
155 |
} |
156 |
} |
157 |
|
158 |
static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, |
159 |
int nb_sectors, int set) |
160 |
{ |
161 |
int64_t start, end; |
162 |
unsigned long val, idx, bit; |
163 |
|
164 |
start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; |
165 |
end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
|
166 |
|
167 |
for (; start <= end; start++) {
|
168 |
idx = start / (sizeof(unsigned long) * 8); |
169 |
bit = start % (sizeof(unsigned long) * 8); |
170 |
val = bmds->aio_bitmap[idx]; |
171 |
if (set) {
|
172 |
if (!(val & (1UL << bit))) { |
173 |
val |= 1UL << bit;
|
174 |
} |
175 |
} else {
|
176 |
if (val & (1UL << bit)) { |
177 |
val &= ~(1UL << bit);
|
178 |
} |
179 |
} |
180 |
bmds->aio_bitmap[idx] = val; |
181 |
} |
182 |
} |
183 |
|
184 |
static void alloc_aio_bitmap(BlkMigDevState *bmds) |
185 |
{ |
186 |
BlockDriverState *bs = bmds->bs; |
187 |
int64_t bitmap_size; |
188 |
|
189 |
bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) + |
190 |
BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; |
191 |
bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
|
192 |
|
193 |
bmds->aio_bitmap = qemu_mallocz(bitmap_size); |
194 |
} |
195 |
|
196 |
static void blk_mig_read_cb(void *opaque, int ret) |
197 |
{ |
198 |
BlkMigBlock *blk = opaque; |
199 |
|
200 |
blk->ret = ret; |
201 |
|
202 |
blk->time = qemu_get_clock_ns(rt_clock) - blk->time; |
203 |
|
204 |
add_avg_read_time(blk->time); |
205 |
|
206 |
QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); |
207 |
bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
|
208 |
|
209 |
block_mig_state.submitted--; |
210 |
block_mig_state.read_done++; |
211 |
assert(block_mig_state.submitted >= 0);
|
212 |
} |
213 |
|
214 |
static int mig_save_device_bulk(Monitor *mon, QEMUFile *f, |
215 |
BlkMigDevState *bmds) |
216 |
{ |
217 |
int64_t total_sectors = bmds->total_sectors; |
218 |
int64_t cur_sector = bmds->cur_sector; |
219 |
BlockDriverState *bs = bmds->bs; |
220 |
BlkMigBlock *blk; |
221 |
int nr_sectors;
|
222 |
|
223 |
if (bmds->shared_base) {
|
224 |
while (cur_sector < total_sectors &&
|
225 |
!bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH, |
226 |
&nr_sectors)) { |
227 |
cur_sector += nr_sectors; |
228 |
} |
229 |
} |
230 |
|
231 |
if (cur_sector >= total_sectors) {
|
232 |
bmds->cur_sector = bmds->completed_sectors = total_sectors; |
233 |
return 1; |
234 |
} |
235 |
|
236 |
bmds->completed_sectors = cur_sector; |
237 |
|
238 |
cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
|
239 |
|
240 |
/* we are going to transfer a full block even if it is not allocated */
|
241 |
nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; |
242 |
|
243 |
if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
|
244 |
nr_sectors = total_sectors - cur_sector; |
245 |
} |
246 |
|
247 |
blk = qemu_malloc(sizeof(BlkMigBlock));
|
248 |
blk->buf = qemu_malloc(BLOCK_SIZE); |
249 |
blk->bmds = bmds; |
250 |
blk->sector = cur_sector; |
251 |
blk->nr_sectors = nr_sectors; |
252 |
|
253 |
blk->iov.iov_base = blk->buf; |
254 |
blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; |
255 |
qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
|
256 |
|
257 |
blk->time = qemu_get_clock_ns(rt_clock); |
258 |
|
259 |
blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov, |
260 |
nr_sectors, blk_mig_read_cb, blk); |
261 |
if (!blk->aiocb) {
|
262 |
goto error;
|
263 |
} |
264 |
block_mig_state.submitted++; |
265 |
|
266 |
bdrv_reset_dirty(bs, cur_sector, nr_sectors); |
267 |
bmds->cur_sector = cur_sector + nr_sectors; |
268 |
|
269 |
return (bmds->cur_sector >= total_sectors);
|
270 |
|
271 |
error:
|
272 |
monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector); |
273 |
qemu_file_set_error(f); |
274 |
qemu_free(blk->buf); |
275 |
qemu_free(blk); |
276 |
return 0; |
277 |
} |
278 |
|
279 |
static void set_dirty_tracking(int enable) |
280 |
{ |
281 |
BlkMigDevState *bmds; |
282 |
|
283 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
284 |
bdrv_set_dirty_tracking(bmds->bs, enable); |
285 |
} |
286 |
} |
287 |
|
288 |
static void init_blk_migration_it(void *opaque, BlockDriverState *bs) |
289 |
{ |
290 |
Monitor *mon = opaque; |
291 |
BlkMigDevState *bmds; |
292 |
int64_t sectors; |
293 |
|
294 |
if (!bdrv_is_read_only(bs)) {
|
295 |
sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS; |
296 |
if (sectors <= 0) { |
297 |
return;
|
298 |
} |
299 |
|
300 |
bmds = qemu_mallocz(sizeof(BlkMigDevState));
|
301 |
bmds->bs = bs; |
302 |
bmds->bulk_completed = 0;
|
303 |
bmds->total_sectors = sectors; |
304 |
bmds->completed_sectors = 0;
|
305 |
bmds->shared_base = block_mig_state.shared_base; |
306 |
alloc_aio_bitmap(bmds); |
307 |
|
308 |
block_mig_state.total_sector_sum += sectors; |
309 |
|
310 |
if (bmds->shared_base) {
|
311 |
monitor_printf(mon, "Start migration for %s with shared base "
|
312 |
"image\n",
|
313 |
bs->device_name); |
314 |
} else {
|
315 |
monitor_printf(mon, "Start full migration for %s\n",
|
316 |
bs->device_name); |
317 |
} |
318 |
|
319 |
QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); |
320 |
} |
321 |
} |
322 |
|
323 |
static void init_blk_migration(Monitor *mon, QEMUFile *f) |
324 |
{ |
325 |
block_mig_state.submitted = 0;
|
326 |
block_mig_state.read_done = 0;
|
327 |
block_mig_state.transferred = 0;
|
328 |
block_mig_state.total_sector_sum = 0;
|
329 |
block_mig_state.prev_progress = -1;
|
330 |
block_mig_state.bulk_completed = 0;
|
331 |
block_mig_state.total_time = 0;
|
332 |
block_mig_state.reads = 0;
|
333 |
|
334 |
bdrv_iterate(init_blk_migration_it, mon); |
335 |
} |
336 |
|
337 |
static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f) |
338 |
{ |
339 |
int64_t completed_sector_sum = 0;
|
340 |
BlkMigDevState *bmds; |
341 |
int progress;
|
342 |
int ret = 0; |
343 |
|
344 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
345 |
if (bmds->bulk_completed == 0) { |
346 |
if (mig_save_device_bulk(mon, f, bmds) == 1) { |
347 |
/* completed bulk section for this device */
|
348 |
bmds->bulk_completed = 1;
|
349 |
} |
350 |
completed_sector_sum += bmds->completed_sectors; |
351 |
ret = 1;
|
352 |
break;
|
353 |
} else {
|
354 |
completed_sector_sum += bmds->completed_sectors; |
355 |
} |
356 |
} |
357 |
|
358 |
progress = completed_sector_sum * 100 / block_mig_state.total_sector_sum;
|
359 |
if (progress != block_mig_state.prev_progress) {
|
360 |
block_mig_state.prev_progress = progress; |
361 |
qemu_put_be64(f, (progress << BDRV_SECTOR_BITS) |
362 |
| BLK_MIG_FLAG_PROGRESS); |
363 |
monitor_printf(mon, "Completed %d %%\r", progress);
|
364 |
monitor_flush(mon); |
365 |
} |
366 |
|
367 |
return ret;
|
368 |
} |
369 |
|
370 |
static void blk_mig_reset_dirty_cursor(void) |
371 |
{ |
372 |
BlkMigDevState *bmds; |
373 |
|
374 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
375 |
bmds->cur_dirty = 0;
|
376 |
} |
377 |
} |
378 |
|
379 |
static int mig_save_device_dirty(Monitor *mon, QEMUFile *f, |
380 |
BlkMigDevState *bmds, int is_async)
|
381 |
{ |
382 |
BlkMigBlock *blk; |
383 |
int64_t total_sectors = bmds->total_sectors; |
384 |
int64_t sector; |
385 |
int nr_sectors;
|
386 |
|
387 |
for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
|
388 |
if (bmds_aio_inflight(bmds, sector))
|
389 |
qemu_aio_flush(); |
390 |
if (bdrv_get_dirty(bmds->bs, sector)) {
|
391 |
|
392 |
if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
|
393 |
nr_sectors = total_sectors - sector; |
394 |
} else {
|
395 |
nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; |
396 |
} |
397 |
blk = qemu_malloc(sizeof(BlkMigBlock));
|
398 |
blk->buf = qemu_malloc(BLOCK_SIZE); |
399 |
blk->bmds = bmds; |
400 |
blk->sector = sector; |
401 |
blk->nr_sectors = nr_sectors; |
402 |
|
403 |
if (is_async) {
|
404 |
blk->iov.iov_base = blk->buf; |
405 |
blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; |
406 |
qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
|
407 |
|
408 |
blk->time = qemu_get_clock_ns(rt_clock); |
409 |
|
410 |
blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov, |
411 |
nr_sectors, blk_mig_read_cb, blk); |
412 |
if (!blk->aiocb) {
|
413 |
goto error;
|
414 |
} |
415 |
block_mig_state.submitted++; |
416 |
bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
|
417 |
} else {
|
418 |
if (bdrv_read(bmds->bs, sector, blk->buf,
|
419 |
nr_sectors) < 0) {
|
420 |
goto error;
|
421 |
} |
422 |
blk_send(f, blk); |
423 |
|
424 |
qemu_free(blk->buf); |
425 |
qemu_free(blk); |
426 |
} |
427 |
|
428 |
bdrv_reset_dirty(bmds->bs, sector, nr_sectors); |
429 |
break;
|
430 |
} |
431 |
sector += BDRV_SECTORS_PER_DIRTY_CHUNK; |
432 |
bmds->cur_dirty = sector; |
433 |
} |
434 |
|
435 |
return (bmds->cur_dirty >= bmds->total_sectors);
|
436 |
|
437 |
error:
|
438 |
monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector); |
439 |
qemu_file_set_error(f); |
440 |
qemu_free(blk->buf); |
441 |
qemu_free(blk); |
442 |
return 0; |
443 |
} |
444 |
|
445 |
static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async) |
446 |
{ |
447 |
BlkMigDevState *bmds; |
448 |
int ret = 0; |
449 |
|
450 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
451 |
if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) { |
452 |
ret = 1;
|
453 |
break;
|
454 |
} |
455 |
} |
456 |
|
457 |
return ret;
|
458 |
} |
459 |
|
460 |
static void flush_blks(QEMUFile* f) |
461 |
{ |
462 |
BlkMigBlock *blk; |
463 |
|
464 |
DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
|
465 |
__FUNCTION__, block_mig_state.submitted, block_mig_state.read_done, |
466 |
block_mig_state.transferred); |
467 |
|
468 |
while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { |
469 |
if (qemu_file_rate_limit(f)) {
|
470 |
break;
|
471 |
} |
472 |
if (blk->ret < 0) { |
473 |
qemu_file_set_error(f); |
474 |
break;
|
475 |
} |
476 |
blk_send(f, blk); |
477 |
|
478 |
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); |
479 |
qemu_free(blk->buf); |
480 |
qemu_free(blk); |
481 |
|
482 |
block_mig_state.read_done--; |
483 |
block_mig_state.transferred++; |
484 |
assert(block_mig_state.read_done >= 0);
|
485 |
} |
486 |
|
487 |
DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
|
488 |
block_mig_state.submitted, block_mig_state.read_done, |
489 |
block_mig_state.transferred); |
490 |
} |
491 |
|
492 |
static int64_t get_remaining_dirty(void) |
493 |
{ |
494 |
BlkMigDevState *bmds; |
495 |
int64_t dirty = 0;
|
496 |
|
497 |
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { |
498 |
dirty += bdrv_get_dirty_count(bmds->bs); |
499 |
} |
500 |
|
501 |
return dirty * BLOCK_SIZE;
|
502 |
} |
503 |
|
504 |
static int is_stage2_completed(void) |
505 |
{ |
506 |
int64_t remaining_dirty; |
507 |
long double bwidth; |
508 |
|
509 |
if (block_mig_state.bulk_completed == 1) { |
510 |
|
511 |
remaining_dirty = get_remaining_dirty(); |
512 |
if (remaining_dirty == 0) { |
513 |
return 1; |
514 |
} |
515 |
|
516 |
bwidth = compute_read_bwidth(); |
517 |
|
518 |
if ((remaining_dirty / bwidth) <=
|
519 |
migrate_max_downtime()) { |
520 |
/* finish stage2 because we think that we can finish remaing work
|
521 |
below max_downtime */
|
522 |
|
523 |
return 1; |
524 |
} |
525 |
} |
526 |
|
527 |
return 0; |
528 |
} |
529 |
|
530 |
static void blk_mig_cleanup(Monitor *mon) |
531 |
{ |
532 |
BlkMigDevState *bmds; |
533 |
BlkMigBlock *blk; |
534 |
|
535 |
while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { |
536 |
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); |
537 |
qemu_free(bmds->aio_bitmap); |
538 |
qemu_free(bmds); |
539 |
} |
540 |
|
541 |
while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { |
542 |
QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); |
543 |
qemu_free(blk->buf); |
544 |
qemu_free(blk); |
545 |
} |
546 |
|
547 |
set_dirty_tracking(0);
|
548 |
|
549 |
monitor_printf(mon, "\n");
|
550 |
} |
551 |
|
552 |
static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque) |
553 |
{ |
554 |
DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
|
555 |
stage, block_mig_state.submitted, block_mig_state.transferred); |
556 |
|
557 |
if (stage < 0) { |
558 |
blk_mig_cleanup(mon); |
559 |
return 0; |
560 |
} |
561 |
|
562 |
if (block_mig_state.blk_enable != 1) { |
563 |
/* no need to migrate storage */
|
564 |
qemu_put_be64(f, BLK_MIG_FLAG_EOS); |
565 |
return 1; |
566 |
} |
567 |
|
568 |
if (stage == 1) { |
569 |
init_blk_migration(mon, f); |
570 |
|
571 |
/* start track dirty blocks */
|
572 |
set_dirty_tracking(1);
|
573 |
} |
574 |
|
575 |
flush_blks(f); |
576 |
|
577 |
if (qemu_file_has_error(f)) {
|
578 |
blk_mig_cleanup(mon); |
579 |
return 0; |
580 |
} |
581 |
|
582 |
blk_mig_reset_dirty_cursor(); |
583 |
|
584 |
if (stage == 2) { |
585 |
/* control the rate of transfer */
|
586 |
while ((block_mig_state.submitted +
|
587 |
block_mig_state.read_done) * BLOCK_SIZE < |
588 |
qemu_file_get_rate_limit(f)) { |
589 |
if (block_mig_state.bulk_completed == 0) { |
590 |
/* first finish the bulk phase */
|
591 |
if (blk_mig_save_bulked_block(mon, f) == 0) { |
592 |
/* finished saving bulk on all devices */
|
593 |
block_mig_state.bulk_completed = 1;
|
594 |
} |
595 |
} else {
|
596 |
if (blk_mig_save_dirty_block(mon, f, 1) == 0) { |
597 |
/* no more dirty blocks */
|
598 |
break;
|
599 |
} |
600 |
} |
601 |
} |
602 |
|
603 |
flush_blks(f); |
604 |
|
605 |
if (qemu_file_has_error(f)) {
|
606 |
blk_mig_cleanup(mon); |
607 |
return 0; |
608 |
} |
609 |
} |
610 |
|
611 |
if (stage == 3) { |
612 |
/* we know for sure that save bulk is completed and
|
613 |
all async read completed */
|
614 |
assert(block_mig_state.submitted == 0);
|
615 |
|
616 |
while (blk_mig_save_dirty_block(mon, f, 0) != 0); |
617 |
blk_mig_cleanup(mon); |
618 |
|
619 |
/* report completion */
|
620 |
qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
|
621 |
|
622 |
if (qemu_file_has_error(f)) {
|
623 |
return 0; |
624 |
} |
625 |
|
626 |
monitor_printf(mon, "Block migration completed\n");
|
627 |
} |
628 |
|
629 |
qemu_put_be64(f, BLK_MIG_FLAG_EOS); |
630 |
|
631 |
return ((stage == 2) && is_stage2_completed()); |
632 |
} |
633 |
|
634 |
static int block_load(QEMUFile *f, void *opaque, int version_id) |
635 |
{ |
636 |
static int banner_printed; |
637 |
int len, flags;
|
638 |
char device_name[256]; |
639 |
int64_t addr; |
640 |
BlockDriverState *bs; |
641 |
uint8_t *buf; |
642 |
|
643 |
do {
|
644 |
addr = qemu_get_be64(f); |
645 |
|
646 |
flags = addr & ~BDRV_SECTOR_MASK; |
647 |
addr >>= BDRV_SECTOR_BITS; |
648 |
|
649 |
if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
|
650 |
int ret;
|
651 |
/* get device name */
|
652 |
len = qemu_get_byte(f); |
653 |
qemu_get_buffer(f, (uint8_t *)device_name, len); |
654 |
device_name[len] = '\0';
|
655 |
|
656 |
bs = bdrv_find(device_name); |
657 |
if (!bs) {
|
658 |
fprintf(stderr, "Error unknown block device %s\n",
|
659 |
device_name); |
660 |
return -EINVAL;
|
661 |
} |
662 |
|
663 |
buf = qemu_malloc(BLOCK_SIZE); |
664 |
|
665 |
qemu_get_buffer(f, buf, BLOCK_SIZE); |
666 |
ret = bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK); |
667 |
|
668 |
qemu_free(buf); |
669 |
if (ret < 0) { |
670 |
return ret;
|
671 |
} |
672 |
} else if (flags & BLK_MIG_FLAG_PROGRESS) { |
673 |
if (!banner_printed) {
|
674 |
printf("Receiving block device images\n");
|
675 |
banner_printed = 1;
|
676 |
} |
677 |
printf("Completed %d %%%c", (int)addr, |
678 |
(addr == 100) ? '\n' : '\r'); |
679 |
fflush(stdout); |
680 |
} else if (!(flags & BLK_MIG_FLAG_EOS)) { |
681 |
fprintf(stderr, "Unknown flags\n");
|
682 |
return -EINVAL;
|
683 |
} |
684 |
if (qemu_file_has_error(f)) {
|
685 |
return -EIO;
|
686 |
} |
687 |
} while (!(flags & BLK_MIG_FLAG_EOS));
|
688 |
|
689 |
return 0; |
690 |
} |
691 |
|
692 |
static void block_set_params(int blk_enable, int shared_base, void *opaque) |
693 |
{ |
694 |
block_mig_state.blk_enable = blk_enable; |
695 |
block_mig_state.shared_base = shared_base; |
696 |
|
697 |
/* shared base means that blk_enable = 1 */
|
698 |
block_mig_state.blk_enable |= shared_base; |
699 |
} |
700 |
|
701 |
void blk_mig_init(void) |
702 |
{ |
703 |
QSIMPLEQ_INIT(&block_mig_state.bmds_list); |
704 |
QSIMPLEQ_INIT(&block_mig_state.blk_list); |
705 |
|
706 |
register_savevm_live(NULL, "block", 0, 1, block_set_params, |
707 |
block_save_live, NULL, block_load, &block_mig_state);
|
708 |
} |