root / migration.c @ be7172e2
History | View | Annotate | Download (18.8 kB)
1 |
/*
|
---|---|
2 |
* QEMU live migration
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2008
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
* Contributions after 2012-01-13 are licensed under the terms of the
|
13 |
* GNU GPL, version 2 or (at your option) any later version.
|
14 |
*/
|
15 |
|
16 |
#include "qemu-common.h" |
17 |
#include "migration/migration.h" |
18 |
#include "monitor/monitor.h" |
19 |
#include "migration/qemu-file.h" |
20 |
#include "sysemu/sysemu.h" |
21 |
#include "block/block.h" |
22 |
#include "qemu/sockets.h" |
23 |
#include "migration/block.h" |
24 |
#include "qemu/thread.h" |
25 |
#include "qmp-commands.h" |
26 |
#include "trace.h" |
27 |
|
28 |
//#define DEBUG_MIGRATION
|
29 |
|
30 |
#ifdef DEBUG_MIGRATION
|
31 |
#define DPRINTF(fmt, ...) \
|
32 |
do { printf("migration: " fmt, ## __VA_ARGS__); } while (0) |
33 |
#else
|
34 |
#define DPRINTF(fmt, ...) \
|
35 |
do { } while (0) |
36 |
#endif
|
37 |
|
38 |
enum {
|
39 |
MIG_STATE_ERROR, |
40 |
MIG_STATE_SETUP, |
41 |
MIG_STATE_CANCELLED, |
42 |
MIG_STATE_ACTIVE, |
43 |
MIG_STATE_COMPLETED, |
44 |
}; |
45 |
|
46 |
#define MAX_THROTTLE (32 << 20) /* Migration speed throttling */ |
47 |
|
48 |
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
|
49 |
* data. */
|
50 |
#define BUFFER_DELAY 100 |
51 |
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) |
52 |
|
53 |
/* Migration XBZRLE default cache size */
|
54 |
#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) |
55 |
|
56 |
static NotifierList migration_state_notifiers =
|
57 |
NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); |
58 |
|
59 |
/* When we add fault tolerance, we could have several
|
60 |
migrations at once. For now we don't need to add
|
61 |
dynamic creation of migration */
|
62 |
|
63 |
MigrationState *migrate_get_current(void)
|
64 |
{ |
65 |
static MigrationState current_migration = {
|
66 |
.state = MIG_STATE_SETUP, |
67 |
.bandwidth_limit = MAX_THROTTLE, |
68 |
.xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, |
69 |
}; |
70 |
|
71 |
return ¤t_migration;
|
72 |
} |
73 |
|
74 |
void qemu_start_incoming_migration(const char *uri, Error **errp) |
75 |
{ |
76 |
const char *p; |
77 |
|
78 |
if (strstart(uri, "tcp:", &p)) |
79 |
tcp_start_incoming_migration(p, errp); |
80 |
#if !defined(WIN32)
|
81 |
else if (strstart(uri, "exec:", &p)) |
82 |
exec_start_incoming_migration(p, errp); |
83 |
else if (strstart(uri, "unix:", &p)) |
84 |
unix_start_incoming_migration(p, errp); |
85 |
else if (strstart(uri, "fd:", &p)) |
86 |
fd_start_incoming_migration(p, errp); |
87 |
#endif
|
88 |
else {
|
89 |
error_setg(errp, "unknown migration protocol: %s", uri);
|
90 |
} |
91 |
} |
92 |
|
93 |
static void process_incoming_migration_co(void *opaque) |
94 |
{ |
95 |
QEMUFile *f = opaque; |
96 |
int ret;
|
97 |
|
98 |
ret = qemu_loadvm_state(f); |
99 |
qemu_fclose(f); |
100 |
if (ret < 0) { |
101 |
fprintf(stderr, "load of migration failed\n");
|
102 |
exit(0);
|
103 |
} |
104 |
qemu_announce_self(); |
105 |
DPRINTF("successfully loaded vm state\n");
|
106 |
|
107 |
bdrv_clear_incoming_migration_all(); |
108 |
/* Make sure all file formats flush their mutable metadata */
|
109 |
bdrv_invalidate_cache_all(); |
110 |
|
111 |
if (autostart) {
|
112 |
vm_start(); |
113 |
} else {
|
114 |
runstate_set(RUN_STATE_PAUSED); |
115 |
} |
116 |
} |
117 |
|
118 |
void process_incoming_migration(QEMUFile *f)
|
119 |
{ |
120 |
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co); |
121 |
int fd = qemu_get_fd(f);
|
122 |
|
123 |
assert(fd != -1);
|
124 |
socket_set_nonblock(fd); |
125 |
qemu_coroutine_enter(co, f); |
126 |
} |
127 |
|
128 |
/* amount of nanoseconds we are willing to wait for migration to be down.
|
129 |
* the choice of nanoseconds is because it is the maximum resolution that
|
130 |
* get_clock() can achieve. It is an internal measure. All user-visible
|
131 |
* units must be in seconds */
|
132 |
static uint64_t max_downtime = 30000000; |
133 |
|
134 |
uint64_t migrate_max_downtime(void)
|
135 |
{ |
136 |
return max_downtime;
|
137 |
} |
138 |
|
139 |
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) |
140 |
{ |
141 |
MigrationCapabilityStatusList *head = NULL;
|
142 |
MigrationCapabilityStatusList *caps; |
143 |
MigrationState *s = migrate_get_current(); |
144 |
int i;
|
145 |
|
146 |
for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) { |
147 |
if (head == NULL) { |
148 |
head = g_malloc0(sizeof(*caps));
|
149 |
caps = head; |
150 |
} else {
|
151 |
caps->next = g_malloc0(sizeof(*caps));
|
152 |
caps = caps->next; |
153 |
} |
154 |
caps->value = |
155 |
g_malloc(sizeof(*caps->value));
|
156 |
caps->value->capability = i; |
157 |
caps->value->state = s->enabled_capabilities[i]; |
158 |
} |
159 |
|
160 |
return head;
|
161 |
} |
162 |
|
163 |
static void get_xbzrle_cache_stats(MigrationInfo *info) |
164 |
{ |
165 |
if (migrate_use_xbzrle()) {
|
166 |
info->has_xbzrle_cache = true;
|
167 |
info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
|
168 |
info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); |
169 |
info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); |
170 |
info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); |
171 |
info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); |
172 |
info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); |
173 |
} |
174 |
} |
175 |
|
176 |
MigrationInfo *qmp_query_migrate(Error **errp) |
177 |
{ |
178 |
MigrationInfo *info = g_malloc0(sizeof(*info));
|
179 |
MigrationState *s = migrate_get_current(); |
180 |
|
181 |
switch (s->state) {
|
182 |
case MIG_STATE_SETUP:
|
183 |
/* no migration has happened ever */
|
184 |
break;
|
185 |
case MIG_STATE_ACTIVE:
|
186 |
info->has_status = true;
|
187 |
info->status = g_strdup("active");
|
188 |
info->has_total_time = true;
|
189 |
info->total_time = qemu_get_clock_ms(rt_clock) |
190 |
- s->total_time; |
191 |
info->has_expected_downtime = true;
|
192 |
info->expected_downtime = s->expected_downtime; |
193 |
|
194 |
info->has_ram = true;
|
195 |
info->ram = g_malloc0(sizeof(*info->ram));
|
196 |
info->ram->transferred = ram_bytes_transferred(); |
197 |
info->ram->remaining = ram_bytes_remaining(); |
198 |
info->ram->total = ram_bytes_total(); |
199 |
info->ram->duplicate = dup_mig_pages_transferred(); |
200 |
info->ram->normal = norm_mig_pages_transferred(); |
201 |
info->ram->normal_bytes = norm_mig_bytes_transferred(); |
202 |
info->ram->dirty_pages_rate = s->dirty_pages_rate; |
203 |
|
204 |
|
205 |
if (blk_mig_active()) {
|
206 |
info->has_disk = true;
|
207 |
info->disk = g_malloc0(sizeof(*info->disk));
|
208 |
info->disk->transferred = blk_mig_bytes_transferred(); |
209 |
info->disk->remaining = blk_mig_bytes_remaining(); |
210 |
info->disk->total = blk_mig_bytes_total(); |
211 |
} |
212 |
|
213 |
get_xbzrle_cache_stats(info); |
214 |
break;
|
215 |
case MIG_STATE_COMPLETED:
|
216 |
get_xbzrle_cache_stats(info); |
217 |
|
218 |
info->has_status = true;
|
219 |
info->status = g_strdup("completed");
|
220 |
info->total_time = s->total_time; |
221 |
info->has_downtime = true;
|
222 |
info->downtime = s->downtime; |
223 |
|
224 |
info->has_ram = true;
|
225 |
info->ram = g_malloc0(sizeof(*info->ram));
|
226 |
info->ram->transferred = ram_bytes_transferred(); |
227 |
info->ram->remaining = 0;
|
228 |
info->ram->total = ram_bytes_total(); |
229 |
info->ram->duplicate = dup_mig_pages_transferred(); |
230 |
info->ram->normal = norm_mig_pages_transferred(); |
231 |
info->ram->normal_bytes = norm_mig_bytes_transferred(); |
232 |
break;
|
233 |
case MIG_STATE_ERROR:
|
234 |
info->has_status = true;
|
235 |
info->status = g_strdup("failed");
|
236 |
break;
|
237 |
case MIG_STATE_CANCELLED:
|
238 |
info->has_status = true;
|
239 |
info->status = g_strdup("cancelled");
|
240 |
break;
|
241 |
} |
242 |
|
243 |
return info;
|
244 |
} |
245 |
|
246 |
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
|
247 |
Error **errp) |
248 |
{ |
249 |
MigrationState *s = migrate_get_current(); |
250 |
MigrationCapabilityStatusList *cap; |
251 |
|
252 |
if (s->state == MIG_STATE_ACTIVE) {
|
253 |
error_set(errp, QERR_MIGRATION_ACTIVE); |
254 |
return;
|
255 |
} |
256 |
|
257 |
for (cap = params; cap; cap = cap->next) {
|
258 |
s->enabled_capabilities[cap->value->capability] = cap->value->state; |
259 |
} |
260 |
} |
261 |
|
262 |
/* shared migration helpers */
|
263 |
|
264 |
static void migrate_fd_cleanup(void *opaque) |
265 |
{ |
266 |
MigrationState *s = opaque; |
267 |
|
268 |
qemu_bh_delete(s->cleanup_bh); |
269 |
s->cleanup_bh = NULL;
|
270 |
|
271 |
if (s->file) {
|
272 |
DPRINTF("closing file\n");
|
273 |
qemu_fclose(s->file); |
274 |
s->file = NULL;
|
275 |
} |
276 |
|
277 |
assert(s->migration_file == NULL);
|
278 |
assert(s->state != MIG_STATE_ACTIVE); |
279 |
|
280 |
if (s->state != MIG_STATE_COMPLETED) {
|
281 |
qemu_savevm_state_cancel(); |
282 |
} |
283 |
|
284 |
notifier_list_notify(&migration_state_notifiers, s); |
285 |
} |
286 |
|
287 |
static void migrate_finish_set_state(MigrationState *s, int new_state) |
288 |
{ |
289 |
if (__sync_val_compare_and_swap(&s->state, MIG_STATE_ACTIVE,
|
290 |
new_state) == new_state) { |
291 |
trace_migrate_set_state(new_state); |
292 |
} |
293 |
} |
294 |
|
295 |
void migrate_fd_error(MigrationState *s)
|
296 |
{ |
297 |
DPRINTF("setting error state\n");
|
298 |
assert(s->file == NULL);
|
299 |
s->state = MIG_STATE_ERROR; |
300 |
trace_migrate_set_state(MIG_STATE_ERROR); |
301 |
notifier_list_notify(&migration_state_notifiers, s); |
302 |
} |
303 |
|
304 |
static void migrate_fd_cancel(MigrationState *s) |
305 |
{ |
306 |
DPRINTF("cancelling migration\n");
|
307 |
|
308 |
migrate_finish_set_state(s, MIG_STATE_CANCELLED); |
309 |
} |
310 |
|
311 |
int migrate_fd_close(MigrationState *s)
|
312 |
{ |
313 |
int rc = 0; |
314 |
if (s->migration_file != NULL) { |
315 |
rc = qemu_fclose(s->migration_file); |
316 |
s->migration_file = NULL;
|
317 |
} |
318 |
return rc;
|
319 |
} |
320 |
|
321 |
void add_migration_state_change_notifier(Notifier *notify)
|
322 |
{ |
323 |
notifier_list_add(&migration_state_notifiers, notify); |
324 |
} |
325 |
|
326 |
void remove_migration_state_change_notifier(Notifier *notify)
|
327 |
{ |
328 |
notifier_remove(notify); |
329 |
} |
330 |
|
331 |
bool migration_is_active(MigrationState *s)
|
332 |
{ |
333 |
return s->state == MIG_STATE_ACTIVE;
|
334 |
} |
335 |
|
336 |
bool migration_has_finished(MigrationState *s)
|
337 |
{ |
338 |
return s->state == MIG_STATE_COMPLETED;
|
339 |
} |
340 |
|
341 |
bool migration_has_failed(MigrationState *s)
|
342 |
{ |
343 |
return (s->state == MIG_STATE_CANCELLED ||
|
344 |
s->state == MIG_STATE_ERROR); |
345 |
} |
346 |
|
347 |
static MigrationState *migrate_init(const MigrationParams *params) |
348 |
{ |
349 |
MigrationState *s = migrate_get_current(); |
350 |
int64_t bandwidth_limit = s->bandwidth_limit; |
351 |
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
|
352 |
int64_t xbzrle_cache_size = s->xbzrle_cache_size; |
353 |
|
354 |
memcpy(enabled_capabilities, s->enabled_capabilities, |
355 |
sizeof(enabled_capabilities));
|
356 |
|
357 |
memset(s, 0, sizeof(*s)); |
358 |
s->bandwidth_limit = bandwidth_limit; |
359 |
s->params = *params; |
360 |
memcpy(s->enabled_capabilities, enabled_capabilities, |
361 |
sizeof(enabled_capabilities));
|
362 |
s->xbzrle_cache_size = xbzrle_cache_size; |
363 |
|
364 |
s->bandwidth_limit = bandwidth_limit; |
365 |
s->state = MIG_STATE_SETUP; |
366 |
trace_migrate_set_state(MIG_STATE_SETUP); |
367 |
|
368 |
s->total_time = qemu_get_clock_ms(rt_clock); |
369 |
return s;
|
370 |
} |
371 |
|
372 |
static GSList *migration_blockers;
|
373 |
|
374 |
void migrate_add_blocker(Error *reason)
|
375 |
{ |
376 |
migration_blockers = g_slist_prepend(migration_blockers, reason); |
377 |
} |
378 |
|
379 |
void migrate_del_blocker(Error *reason)
|
380 |
{ |
381 |
migration_blockers = g_slist_remove(migration_blockers, reason); |
382 |
} |
383 |
|
384 |
void qmp_migrate(const char *uri, bool has_blk, bool blk, |
385 |
bool has_inc, bool inc, bool has_detach, bool detach, |
386 |
Error **errp) |
387 |
{ |
388 |
Error *local_err = NULL;
|
389 |
MigrationState *s = migrate_get_current(); |
390 |
MigrationParams params; |
391 |
const char *p; |
392 |
|
393 |
params.blk = blk; |
394 |
params.shared = inc; |
395 |
|
396 |
if (s->state == MIG_STATE_ACTIVE) {
|
397 |
error_set(errp, QERR_MIGRATION_ACTIVE); |
398 |
return;
|
399 |
} |
400 |
|
401 |
if (qemu_savevm_state_blocked(errp)) {
|
402 |
return;
|
403 |
} |
404 |
|
405 |
if (migration_blockers) {
|
406 |
*errp = error_copy(migration_blockers->data); |
407 |
return;
|
408 |
} |
409 |
|
410 |
s = migrate_init(¶ms); |
411 |
|
412 |
if (strstart(uri, "tcp:", &p)) { |
413 |
tcp_start_outgoing_migration(s, p, &local_err); |
414 |
#if !defined(WIN32)
|
415 |
} else if (strstart(uri, "exec:", &p)) { |
416 |
exec_start_outgoing_migration(s, p, &local_err); |
417 |
} else if (strstart(uri, "unix:", &p)) { |
418 |
unix_start_outgoing_migration(s, p, &local_err); |
419 |
} else if (strstart(uri, "fd:", &p)) { |
420 |
fd_start_outgoing_migration(s, p, &local_err); |
421 |
#endif
|
422 |
} else {
|
423 |
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol"); |
424 |
return;
|
425 |
} |
426 |
|
427 |
if (local_err) {
|
428 |
migrate_fd_error(s); |
429 |
error_propagate(errp, local_err); |
430 |
return;
|
431 |
} |
432 |
} |
433 |
|
434 |
void qmp_migrate_cancel(Error **errp)
|
435 |
{ |
436 |
migrate_fd_cancel(migrate_get_current()); |
437 |
} |
438 |
|
439 |
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
|
440 |
{ |
441 |
MigrationState *s = migrate_get_current(); |
442 |
|
443 |
/* Check for truncation */
|
444 |
if (value != (size_t)value) {
|
445 |
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
|
446 |
"exceeding address space");
|
447 |
return;
|
448 |
} |
449 |
|
450 |
s->xbzrle_cache_size = xbzrle_cache_resize(value); |
451 |
} |
452 |
|
453 |
int64_t qmp_query_migrate_cache_size(Error **errp) |
454 |
{ |
455 |
return migrate_xbzrle_cache_size();
|
456 |
} |
457 |
|
458 |
void qmp_migrate_set_speed(int64_t value, Error **errp)
|
459 |
{ |
460 |
MigrationState *s; |
461 |
|
462 |
if (value < 0) { |
463 |
value = 0;
|
464 |
} |
465 |
|
466 |
s = migrate_get_current(); |
467 |
s->bandwidth_limit = value; |
468 |
qemu_file_set_rate_limit(s->file, s->bandwidth_limit); |
469 |
} |
470 |
|
471 |
void qmp_migrate_set_downtime(double value, Error **errp) |
472 |
{ |
473 |
value *= 1e9;
|
474 |
value = MAX(0, MIN(UINT64_MAX, value));
|
475 |
max_downtime = (uint64_t)value; |
476 |
} |
477 |
|
478 |
int migrate_use_xbzrle(void) |
479 |
{ |
480 |
MigrationState *s; |
481 |
|
482 |
s = migrate_get_current(); |
483 |
|
484 |
return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
|
485 |
} |
486 |
|
487 |
int64_t migrate_xbzrle_cache_size(void)
|
488 |
{ |
489 |
MigrationState *s; |
490 |
|
491 |
s = migrate_get_current(); |
492 |
|
493 |
return s->xbzrle_cache_size;
|
494 |
} |
495 |
|
496 |
/* migration thread support */
|
497 |
|
498 |
static int migration_put_buffer(void *opaque, const uint8_t *buf, |
499 |
int64_t pos, int size)
|
500 |
{ |
501 |
MigrationState *s = opaque; |
502 |
int ret;
|
503 |
|
504 |
DPRINTF("putting %d bytes at %" PRId64 "\n", size, pos); |
505 |
|
506 |
if (size <= 0) { |
507 |
return size;
|
508 |
} |
509 |
|
510 |
qemu_put_buffer(s->migration_file, buf, size); |
511 |
ret = qemu_file_get_error(s->migration_file); |
512 |
if (ret) {
|
513 |
return ret;
|
514 |
} |
515 |
|
516 |
s->bytes_xfer += size; |
517 |
return size;
|
518 |
} |
519 |
|
520 |
static int migration_close(void *opaque) |
521 |
{ |
522 |
MigrationState *s = opaque; |
523 |
|
524 |
DPRINTF("closing\n");
|
525 |
|
526 |
qemu_mutex_unlock_iothread(); |
527 |
qemu_thread_join(&s->thread); |
528 |
qemu_mutex_lock_iothread(); |
529 |
assert(s->state != MIG_STATE_ACTIVE); |
530 |
|
531 |
return migrate_fd_close(s);
|
532 |
} |
533 |
|
534 |
static int migration_get_fd(void *opaque) |
535 |
{ |
536 |
MigrationState *s = opaque; |
537 |
|
538 |
return qemu_get_fd(s->migration_file);
|
539 |
} |
540 |
|
541 |
/*
|
542 |
* The meaning of the return values is:
|
543 |
* 0: We can continue sending
|
544 |
* 1: Time to stop
|
545 |
* negative: There has been an error
|
546 |
*/
|
547 |
static int migration_rate_limit(void *opaque) |
548 |
{ |
549 |
MigrationState *s = opaque; |
550 |
int ret;
|
551 |
|
552 |
ret = qemu_file_get_error(s->file); |
553 |
if (ret) {
|
554 |
return ret;
|
555 |
} |
556 |
|
557 |
if (s->bytes_xfer >= s->xfer_limit) {
|
558 |
return 1; |
559 |
} |
560 |
|
561 |
return 0; |
562 |
} |
563 |
|
564 |
static int64_t migration_set_rate_limit(void *opaque, int64_t new_rate) |
565 |
{ |
566 |
MigrationState *s = opaque; |
567 |
if (qemu_file_get_error(s->file)) {
|
568 |
goto out;
|
569 |
} |
570 |
if (new_rate > SIZE_MAX) {
|
571 |
new_rate = SIZE_MAX; |
572 |
} |
573 |
|
574 |
s->xfer_limit = new_rate / XFER_LIMIT_RATIO; |
575 |
|
576 |
out:
|
577 |
return s->xfer_limit;
|
578 |
} |
579 |
|
580 |
static int64_t migration_get_rate_limit(void *opaque) |
581 |
{ |
582 |
MigrationState *s = opaque; |
583 |
|
584 |
return s->xfer_limit;
|
585 |
} |
586 |
|
587 |
static void *migration_thread(void *opaque) |
588 |
{ |
589 |
MigrationState *s = opaque; |
590 |
int64_t initial_time = qemu_get_clock_ms(rt_clock); |
591 |
int64_t sleep_time = 0;
|
592 |
int64_t initial_bytes = 0;
|
593 |
int64_t max_size = 0;
|
594 |
int64_t start_time = initial_time; |
595 |
bool old_vm_running = false; |
596 |
|
597 |
DPRINTF("beginning savevm\n");
|
598 |
qemu_savevm_state_begin(s->file, &s->params); |
599 |
|
600 |
while (s->state == MIG_STATE_ACTIVE) {
|
601 |
int64_t current_time; |
602 |
uint64_t pending_size; |
603 |
|
604 |
if (!qemu_file_rate_limit(s->file)) {
|
605 |
DPRINTF("iterate\n");
|
606 |
pending_size = qemu_savevm_state_pending(s->file, max_size); |
607 |
DPRINTF("pending size %lu max %lu\n", pending_size, max_size);
|
608 |
if (pending_size && pending_size >= max_size) {
|
609 |
qemu_savevm_state_iterate(s->file); |
610 |
} else {
|
611 |
DPRINTF("done iterating\n");
|
612 |
qemu_mutex_lock_iothread(); |
613 |
start_time = qemu_get_clock_ms(rt_clock); |
614 |
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); |
615 |
old_vm_running = runstate_is_running(); |
616 |
vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); |
617 |
s->xfer_limit = INT_MAX; |
618 |
qemu_savevm_state_complete(s->file); |
619 |
qemu_mutex_unlock_iothread(); |
620 |
if (!qemu_file_get_error(s->file)) {
|
621 |
migrate_finish_set_state(s, MIG_STATE_COMPLETED); |
622 |
break;
|
623 |
} |
624 |
} |
625 |
} |
626 |
|
627 |
if (qemu_file_get_error(s->file)) {
|
628 |
migrate_finish_set_state(s, MIG_STATE_ERROR); |
629 |
break;
|
630 |
} |
631 |
current_time = qemu_get_clock_ms(rt_clock); |
632 |
if (current_time >= initial_time + BUFFER_DELAY) {
|
633 |
uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes; |
634 |
uint64_t time_spent = current_time - initial_time - sleep_time; |
635 |
double bandwidth = transferred_bytes / time_spent;
|
636 |
max_size = bandwidth * migrate_max_downtime() / 1000000;
|
637 |
|
638 |
DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64 |
639 |
" bandwidth %g max_size %" PRId64 "\n", |
640 |
transferred_bytes, time_spent, bandwidth, max_size); |
641 |
/* if we haven't sent anything, we don't want to recalculate
|
642 |
10000 is a small enough number for our purposes */
|
643 |
if (s->dirty_bytes_rate && transferred_bytes > 10000) { |
644 |
s->expected_downtime = s->dirty_bytes_rate / bandwidth; |
645 |
} |
646 |
|
647 |
s->bytes_xfer = 0;
|
648 |
sleep_time = 0;
|
649 |
initial_time = current_time; |
650 |
initial_bytes = qemu_ftell(s->file); |
651 |
} |
652 |
if (qemu_file_rate_limit(s->file)) {
|
653 |
/* usleep expects microseconds */
|
654 |
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
|
655 |
sleep_time += qemu_get_clock_ms(rt_clock) - current_time; |
656 |
} |
657 |
} |
658 |
|
659 |
qemu_mutex_lock_iothread(); |
660 |
if (s->state == MIG_STATE_COMPLETED) {
|
661 |
int64_t end_time = qemu_get_clock_ms(rt_clock); |
662 |
s->total_time = end_time - s->total_time; |
663 |
s->downtime = end_time - start_time; |
664 |
runstate_set(RUN_STATE_POSTMIGRATE); |
665 |
} else {
|
666 |
if (old_vm_running) {
|
667 |
vm_start(); |
668 |
} |
669 |
} |
670 |
qemu_bh_schedule(s->cleanup_bh); |
671 |
qemu_mutex_unlock_iothread(); |
672 |
|
673 |
return NULL; |
674 |
} |
675 |
|
676 |
static const QEMUFileOps migration_file_ops = { |
677 |
.get_fd = migration_get_fd, |
678 |
.put_buffer = migration_put_buffer, |
679 |
.close = migration_close, |
680 |
.rate_limit = migration_rate_limit, |
681 |
.get_rate_limit = migration_get_rate_limit, |
682 |
.set_rate_limit = migration_set_rate_limit, |
683 |
}; |
684 |
|
685 |
void migrate_fd_connect(MigrationState *s)
|
686 |
{ |
687 |
s->state = MIG_STATE_ACTIVE; |
688 |
trace_migrate_set_state(MIG_STATE_ACTIVE); |
689 |
|
690 |
s->bytes_xfer = 0;
|
691 |
/* This is a best 1st approximation. ns to ms */
|
692 |
s->expected_downtime = max_downtime/1000000;
|
693 |
|
694 |
s->xfer_limit = s->bandwidth_limit / XFER_LIMIT_RATIO; |
695 |
|
696 |
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); |
697 |
s->file = qemu_fopen_ops(s, &migration_file_ops); |
698 |
|
699 |
qemu_thread_create(&s->thread, migration_thread, s, |
700 |
QEMU_THREAD_JOINABLE); |
701 |
notifier_list_notify(&migration_state_notifiers, s); |
702 |
} |