Statistics
| Branch: | Revision:

root / arch_init.c @ b2a8658e

History | View | Annotate | Download (29 kB)

1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include <stdint.h>
25
#include <stdarg.h>
26
#include <stdlib.h>
27
#ifndef _WIN32
28
#include <sys/types.h>
29
#include <sys/mman.h>
30
#endif
31
#include "config.h"
32
#include "monitor/monitor.h"
33
#include "sysemu/sysemu.h"
34
#include "qemu/bitops.h"
35
#include "qemu/bitmap.h"
36
#include "sysemu/arch_init.h"
37
#include "audio/audio.h"
38
#include "hw/pc.h"
39
#include "hw/pci/pci.h"
40
#include "hw/audiodev.h"
41
#include "sysemu/kvm.h"
42
#include "migration/migration.h"
43
#include "exec/gdbstub.h"
44
#include "hw/smbios.h"
45
#include "exec/address-spaces.h"
46
#include "hw/pcspk.h"
47
#include "migration/page_cache.h"
48
#include "qemu/config-file.h"
49
#include "qmp-commands.h"
50
#include "trace.h"
51
#include "exec/cpu-all.h"
52

    
53
#ifdef DEBUG_ARCH_INIT
54
#define DPRINTF(fmt, ...) \
55
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
56
#else
57
#define DPRINTF(fmt, ...) \
58
    do { } while (0)
59
#endif
60

    
61
#ifdef TARGET_SPARC
62
int graphic_width = 1024;
63
int graphic_height = 768;
64
int graphic_depth = 8;
65
#else
66
int graphic_width = 800;
67
int graphic_height = 600;
68
int graphic_depth = 15;
69
#endif
70

    
71

    
72
#if defined(TARGET_ALPHA)
73
#define QEMU_ARCH QEMU_ARCH_ALPHA
74
#elif defined(TARGET_ARM)
75
#define QEMU_ARCH QEMU_ARCH_ARM
76
#elif defined(TARGET_CRIS)
77
#define QEMU_ARCH QEMU_ARCH_CRIS
78
#elif defined(TARGET_I386)
79
#define QEMU_ARCH QEMU_ARCH_I386
80
#elif defined(TARGET_M68K)
81
#define QEMU_ARCH QEMU_ARCH_M68K
82
#elif defined(TARGET_LM32)
83
#define QEMU_ARCH QEMU_ARCH_LM32
84
#elif defined(TARGET_MICROBLAZE)
85
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
86
#elif defined(TARGET_MIPS)
87
#define QEMU_ARCH QEMU_ARCH_MIPS
88
#elif defined(TARGET_OPENRISC)
89
#define QEMU_ARCH QEMU_ARCH_OPENRISC
90
#elif defined(TARGET_PPC)
91
#define QEMU_ARCH QEMU_ARCH_PPC
92
#elif defined(TARGET_S390X)
93
#define QEMU_ARCH QEMU_ARCH_S390X
94
#elif defined(TARGET_SH4)
95
#define QEMU_ARCH QEMU_ARCH_SH4
96
#elif defined(TARGET_SPARC)
97
#define QEMU_ARCH QEMU_ARCH_SPARC
98
#elif defined(TARGET_XTENSA)
99
#define QEMU_ARCH QEMU_ARCH_XTENSA
100
#elif defined(TARGET_UNICORE32)
101
#define QEMU_ARCH QEMU_ARCH_UNICORE32
102
#endif
103

    
104
const uint32_t arch_type = QEMU_ARCH;
105

    
106
/***********************************************************/
107
/* ram save/restore */
108

    
109
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
110
#define RAM_SAVE_FLAG_COMPRESS 0x02
111
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
112
#define RAM_SAVE_FLAG_PAGE     0x08
113
#define RAM_SAVE_FLAG_EOS      0x10
114
#define RAM_SAVE_FLAG_CONTINUE 0x20
115
#define RAM_SAVE_FLAG_XBZRLE   0x40
116

    
117
#ifdef __ALTIVEC__
118
#include <altivec.h>
119
#define VECTYPE        vector unsigned char
120
#define SPLAT(p)       vec_splat(vec_ld(0, p), 0)
121
#define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
122
/* altivec.h may redefine the bool macro as vector type.
123
 * Reset it to POSIX semantics. */
124
#undef bool
125
#define bool _Bool
126
#elif defined __SSE2__
127
#include <emmintrin.h>
128
#define VECTYPE        __m128i
129
#define SPLAT(p)       _mm_set1_epi8(*(p))
130
#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
131
#else
132
#define VECTYPE        unsigned long
133
#define SPLAT(p)       (*(p) * (~0UL / 255))
134
#define ALL_EQ(v1, v2) ((v1) == (v2))
135
#endif
136

    
137

    
138
static struct defconfig_file {
139
    const char *filename;
140
    /* Indicates it is an user config file (disabled by -no-user-config) */
141
    bool userconfig;
142
} default_config_files[] = {
143
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
144
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
145
    { NULL }, /* end of list */
146
};
147

    
148

    
149
int qemu_read_default_config_files(bool userconfig)
150
{
151
    int ret;
152
    struct defconfig_file *f;
153

    
154
    for (f = default_config_files; f->filename; f++) {
155
        if (!userconfig && f->userconfig) {
156
            continue;
157
        }
158
        ret = qemu_read_config_file(f->filename);
159
        if (ret < 0 && ret != -ENOENT) {
160
            return ret;
161
        }
162
    }
163
    
164
    return 0;
165
}
166

    
167
static int is_dup_page(uint8_t *page)
168
{
169
    VECTYPE *p = (VECTYPE *)page;
170
    VECTYPE val = SPLAT(page);
171
    int i;
172

    
173
    for (i = 0; i < TARGET_PAGE_SIZE / sizeof(VECTYPE); i++) {
174
        if (!ALL_EQ(val, p[i])) {
175
            return 0;
176
        }
177
    }
178

    
179
    return 1;
180
}
181

    
182
/* struct contains XBZRLE cache and a static page
183
   used by the compression */
184
static struct {
185
    /* buffer used for XBZRLE encoding */
186
    uint8_t *encoded_buf;
187
    /* buffer for storing page content */
188
    uint8_t *current_buf;
189
    /* buffer used for XBZRLE decoding */
190
    uint8_t *decoded_buf;
191
    /* Cache for XBZRLE */
192
    PageCache *cache;
193
} XBZRLE = {
194
    .encoded_buf = NULL,
195
    .current_buf = NULL,
196
    .decoded_buf = NULL,
197
    .cache = NULL,
198
};
199

    
200

    
201
int64_t xbzrle_cache_resize(int64_t new_size)
202
{
203
    if (XBZRLE.cache != NULL) {
204
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
205
            TARGET_PAGE_SIZE;
206
    }
207
    return pow2floor(new_size);
208
}
209

    
210
/* accounting for migration statistics */
211
typedef struct AccountingInfo {
212
    uint64_t dup_pages;
213
    uint64_t norm_pages;
214
    uint64_t iterations;
215
    uint64_t xbzrle_bytes;
216
    uint64_t xbzrle_pages;
217
    uint64_t xbzrle_cache_miss;
218
    uint64_t xbzrle_overflows;
219
} AccountingInfo;
220

    
221
static AccountingInfo acct_info;
222

    
223
static void acct_clear(void)
224
{
225
    memset(&acct_info, 0, sizeof(acct_info));
226
}
227

    
228
uint64_t dup_mig_bytes_transferred(void)
229
{
230
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
231
}
232

    
233
uint64_t dup_mig_pages_transferred(void)
234
{
235
    return acct_info.dup_pages;
236
}
237

    
238
uint64_t norm_mig_bytes_transferred(void)
239
{
240
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
241
}
242

    
243
uint64_t norm_mig_pages_transferred(void)
244
{
245
    return acct_info.norm_pages;
246
}
247

    
248
uint64_t xbzrle_mig_bytes_transferred(void)
249
{
250
    return acct_info.xbzrle_bytes;
251
}
252

    
253
uint64_t xbzrle_mig_pages_transferred(void)
254
{
255
    return acct_info.xbzrle_pages;
256
}
257

    
258
uint64_t xbzrle_mig_pages_cache_miss(void)
259
{
260
    return acct_info.xbzrle_cache_miss;
261
}
262

    
263
uint64_t xbzrle_mig_pages_overflow(void)
264
{
265
    return acct_info.xbzrle_overflows;
266
}
267

    
268
static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
269
        int cont, int flag)
270
{
271
        qemu_put_be64(f, offset | cont | flag);
272
        if (!cont) {
273
                qemu_put_byte(f, strlen(block->idstr));
274
                qemu_put_buffer(f, (uint8_t *)block->idstr,
275
                                strlen(block->idstr));
276
        }
277

    
278
}
279

    
280
#define ENCODING_FLAG_XBZRLE 0x1
281

    
282
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
283
                            ram_addr_t current_addr, RAMBlock *block,
284
                            ram_addr_t offset, int cont, bool last_stage)
285
{
286
    int encoded_len = 0, bytes_sent = -1;
287
    uint8_t *prev_cached_page;
288

    
289
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
290
        if (!last_stage) {
291
            cache_insert(XBZRLE.cache, current_addr,
292
                         g_memdup(current_data, TARGET_PAGE_SIZE));
293
        }
294
        acct_info.xbzrle_cache_miss++;
295
        return -1;
296
    }
297

    
298
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
299

    
300
    /* save current buffer into memory */
301
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
302

    
303
    /* XBZRLE encoding (if there is no overflow) */
304
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
305
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
306
                                       TARGET_PAGE_SIZE);
307
    if (encoded_len == 0) {
308
        DPRINTF("Skipping unmodified page\n");
309
        return 0;
310
    } else if (encoded_len == -1) {
311
        DPRINTF("Overflow\n");
312
        acct_info.xbzrle_overflows++;
313
        /* update data in the cache */
314
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
315
        return -1;
316
    }
317

    
318
    /* we need to update the data in the cache, in order to get the same data */
319
    if (!last_stage) {
320
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
321
    }
322

    
323
    /* Send XBZRLE based compressed page */
324
    save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
325
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
326
    qemu_put_be16(f, encoded_len);
327
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
328
    bytes_sent = encoded_len + 1 + 2;
329
    acct_info.xbzrle_pages++;
330
    acct_info.xbzrle_bytes += bytes_sent;
331

    
332
    return bytes_sent;
333
}
334

    
335
static RAMBlock *last_block;
336
static ram_addr_t last_offset;
337
static unsigned long *migration_bitmap;
338
static uint64_t migration_dirty_pages;
339
static uint32_t last_version;
340

    
341
static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
342
                                                         ram_addr_t offset)
343
{
344
    bool ret;
345
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
346

    
347
    ret = test_and_clear_bit(nr, migration_bitmap);
348

    
349
    if (ret) {
350
        migration_dirty_pages--;
351
    }
352
    return ret;
353
}
354

    
355
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
356
                                              ram_addr_t offset)
357
{
358
    bool ret;
359
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
360

    
361
    ret = test_and_set_bit(nr, migration_bitmap);
362

    
363
    if (!ret) {
364
        migration_dirty_pages++;
365
    }
366
    return ret;
367
}
368

    
369
static void migration_bitmap_sync(void)
370
{
371
    RAMBlock *block;
372
    ram_addr_t addr;
373
    uint64_t num_dirty_pages_init = migration_dirty_pages;
374
    MigrationState *s = migrate_get_current();
375
    static int64_t start_time;
376
    static int64_t num_dirty_pages_period;
377
    int64_t end_time;
378

    
379
    if (!start_time) {
380
        start_time = qemu_get_clock_ms(rt_clock);
381
    }
382

    
383
    trace_migration_bitmap_sync_start();
384
    memory_global_sync_dirty_bitmap(get_system_memory());
385

    
386
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
387
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
388
            if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
389
                                        DIRTY_MEMORY_MIGRATION)) {
390
                migration_bitmap_set_dirty(block->mr, addr);
391
            }
392
        }
393
        memory_region_reset_dirty(block->mr, 0, block->length,
394
                                  DIRTY_MEMORY_MIGRATION);
395
    }
396
    trace_migration_bitmap_sync_end(migration_dirty_pages
397
                                    - num_dirty_pages_init);
398
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
399
    end_time = qemu_get_clock_ms(rt_clock);
400

    
401
    /* more than 1 second = 1000 millisecons */
402
    if (end_time > start_time + 1000) {
403
        s->dirty_pages_rate = num_dirty_pages_period * 1000
404
            / (end_time - start_time);
405
        start_time = end_time;
406
        num_dirty_pages_period = 0;
407
    }
408
}
409

    
410
/*
411
 * ram_save_block: Writes a page of memory to the stream f
412
 *
413
 * Returns:  0: if the page hasn't changed
414
 *          -1: if there are no more dirty pages
415
 *           n: the amount of bytes written in other case
416
 */
417

    
418
static int ram_save_block(QEMUFile *f, bool last_stage)
419
{
420
    RAMBlock *block = last_block;
421
    ram_addr_t offset = last_offset;
422
    int bytes_sent = -1;
423
    MemoryRegion *mr;
424
    ram_addr_t current_addr;
425

    
426
    if (!block)
427
        block = QTAILQ_FIRST(&ram_list.blocks);
428

    
429
    do {
430
        mr = block->mr;
431
        if (migration_bitmap_test_and_reset_dirty(mr, offset)) {
432
            uint8_t *p;
433
            int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
434

    
435
            p = memory_region_get_ram_ptr(mr) + offset;
436

    
437
            if (is_dup_page(p)) {
438
                acct_info.dup_pages++;
439
                save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
440
                qemu_put_byte(f, *p);
441
                bytes_sent = 1;
442
            } else if (migrate_use_xbzrle()) {
443
                current_addr = block->offset + offset;
444
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
445
                                              offset, cont, last_stage);
446
                if (!last_stage) {
447
                    p = get_cached_data(XBZRLE.cache, current_addr);
448
                }
449
            }
450

    
451
            /* either we didn't send yet (we may have had XBZRLE overflow) */
452
            if (bytes_sent == -1) {
453
                save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
454
                qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
455
                bytes_sent = TARGET_PAGE_SIZE;
456
                acct_info.norm_pages++;
457
            }
458

    
459
            /* if page is unmodified, continue to the next */
460
            if (bytes_sent != 0) {
461
                break;
462
            }
463
        }
464

    
465
        offset += TARGET_PAGE_SIZE;
466
        if (offset >= block->length) {
467
            offset = 0;
468
            block = QTAILQ_NEXT(block, next);
469
            if (!block)
470
                block = QTAILQ_FIRST(&ram_list.blocks);
471
        }
472
    } while (block != last_block || offset != last_offset);
473

    
474
    last_block = block;
475
    last_offset = offset;
476

    
477
    return bytes_sent;
478
}
479

    
480
static uint64_t bytes_transferred;
481

    
482
static ram_addr_t ram_save_remaining(void)
483
{
484
    return migration_dirty_pages;
485
}
486

    
487
uint64_t ram_bytes_remaining(void)
488
{
489
    return ram_save_remaining() * TARGET_PAGE_SIZE;
490
}
491

    
492
uint64_t ram_bytes_transferred(void)
493
{
494
    return bytes_transferred;
495
}
496

    
497
uint64_t ram_bytes_total(void)
498
{
499
    RAMBlock *block;
500
    uint64_t total = 0;
501

    
502
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
503
        total += block->length;
504

    
505
    return total;
506
}
507

    
508
static void migration_end(void)
509
{
510
    if (migration_bitmap) {
511
        memory_global_dirty_log_stop();
512
        g_free(migration_bitmap);
513
        migration_bitmap = NULL;
514
    }
515

    
516
    if (XBZRLE.cache) {
517
        cache_fini(XBZRLE.cache);
518
        g_free(XBZRLE.cache);
519
        g_free(XBZRLE.encoded_buf);
520
        g_free(XBZRLE.current_buf);
521
        g_free(XBZRLE.decoded_buf);
522
        XBZRLE.cache = NULL;
523
    }
524
}
525

    
526
static void ram_migration_cancel(void *opaque)
527
{
528
    migration_end();
529
}
530

    
531
static void reset_ram_globals(void)
532
{
533
    last_block = NULL;
534
    last_offset = 0;
535
    last_version = ram_list.version;
536
}
537

    
538
#define MAX_WAIT 50 /* ms, half buffered_file limit */
539

    
540
static int ram_save_setup(QEMUFile *f, void *opaque)
541
{
542
    RAMBlock *block;
543
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
544

    
545
    migration_bitmap = bitmap_new(ram_pages);
546
    bitmap_set(migration_bitmap, 0, ram_pages);
547
    migration_dirty_pages = ram_pages;
548

    
549
    qemu_mutex_lock_ramlist();
550
    bytes_transferred = 0;
551
    reset_ram_globals();
552

    
553
    if (migrate_use_xbzrle()) {
554
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
555
                                  TARGET_PAGE_SIZE,
556
                                  TARGET_PAGE_SIZE);
557
        if (!XBZRLE.cache) {
558
            DPRINTF("Error creating cache\n");
559
            return -1;
560
        }
561
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
562
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
563
        acct_clear();
564
    }
565

    
566
    memory_global_dirty_log_start();
567
    migration_bitmap_sync();
568

    
569
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
570

    
571
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
572
        qemu_put_byte(f, strlen(block->idstr));
573
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
574
        qemu_put_be64(f, block->length);
575
    }
576

    
577
    qemu_mutex_unlock_ramlist();
578
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
579

    
580
    return 0;
581
}
582

    
583
static int ram_save_iterate(QEMUFile *f, void *opaque)
584
{
585
    uint64_t bytes_transferred_last;
586
    double bwidth = 0;
587
    int ret;
588
    int i;
589
    uint64_t expected_downtime;
590
    MigrationState *s = migrate_get_current();
591

    
592
    qemu_mutex_lock_ramlist();
593

    
594
    if (ram_list.version != last_version) {
595
        reset_ram_globals();
596
    }
597

    
598
    bytes_transferred_last = bytes_transferred;
599
    bwidth = qemu_get_clock_ns(rt_clock);
600

    
601
    i = 0;
602
    while ((ret = qemu_file_rate_limit(f)) == 0) {
603
        int bytes_sent;
604

    
605
        bytes_sent = ram_save_block(f, false);
606
        /* no more blocks to sent */
607
        if (bytes_sent < 0) {
608
            break;
609
        }
610
        bytes_transferred += bytes_sent;
611
        acct_info.iterations++;
612
        /* we want to check in the 1st loop, just in case it was the 1st time
613
           and we had to sync the dirty bitmap.
614
           qemu_get_clock_ns() is a bit expensive, so we only check each some
615
           iterations
616
        */
617
        if ((i & 63) == 0) {
618
            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
619
            if (t1 > MAX_WAIT) {
620
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
621
                        t1, i);
622
                break;
623
            }
624
        }
625
        i++;
626
    }
627

    
628
    if (ret < 0) {
629
        return ret;
630
    }
631

    
632
    bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
633
    bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
634

    
635
    /* if we haven't transferred anything this round, force
636
     * expected_downtime to a very high value, but without
637
     * crashing */
638
    if (bwidth == 0) {
639
        bwidth = 0.000001;
640
    }
641

    
642
    qemu_mutex_unlock_ramlist();
643
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
644

    
645
    expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
646
    DPRINTF("ram_save_live: expected(%" PRIu64 ") <= max(" PRIu64 ")?\n",
647
            expected_downtime, migrate_max_downtime());
648

    
649
    if (expected_downtime <= migrate_max_downtime()) {
650
        migration_bitmap_sync();
651
        expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
652
        s->expected_downtime = expected_downtime / 1000000; /* ns -> ms */
653

    
654
        return expected_downtime <= migrate_max_downtime();
655
    }
656
    return 0;
657
}
658

    
659
static int ram_save_complete(QEMUFile *f, void *opaque)
660
{
661
    migration_bitmap_sync();
662

    
663
    qemu_mutex_lock_ramlist();
664

    
665
    /* try transferring iterative blocks of memory */
666

    
667
    /* flush all remaining blocks regardless of rate limiting */
668
    while (true) {
669
        int bytes_sent;
670

    
671
        bytes_sent = ram_save_block(f, true);
672
        /* no more blocks to sent */
673
        if (bytes_sent < 0) {
674
            break;
675
        }
676
        bytes_transferred += bytes_sent;
677
    }
678
    migration_end();
679

    
680
    qemu_mutex_unlock_ramlist();
681
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
682

    
683
    return 0;
684
}
685

    
686
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
687
{
688
    int ret, rc = 0;
689
    unsigned int xh_len;
690
    int xh_flags;
691

    
692
    if (!XBZRLE.decoded_buf) {
693
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
694
    }
695

    
696
    /* extract RLE header */
697
    xh_flags = qemu_get_byte(f);
698
    xh_len = qemu_get_be16(f);
699

    
700
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
701
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
702
        return -1;
703
    }
704

    
705
    if (xh_len > TARGET_PAGE_SIZE) {
706
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
707
        return -1;
708
    }
709
    /* load data and decode */
710
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
711

    
712
    /* decode RLE */
713
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
714
                               TARGET_PAGE_SIZE);
715
    if (ret == -1) {
716
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
717
        rc = -1;
718
    } else  if (ret > TARGET_PAGE_SIZE) {
719
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
720
                ret, TARGET_PAGE_SIZE);
721
        abort();
722
    }
723

    
724
    return rc;
725
}
726

    
727
static inline void *host_from_stream_offset(QEMUFile *f,
728
                                            ram_addr_t offset,
729
                                            int flags)
730
{
731
    static RAMBlock *block = NULL;
732
    char id[256];
733
    uint8_t len;
734

    
735
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
736
        if (!block) {
737
            fprintf(stderr, "Ack, bad migration stream!\n");
738
            return NULL;
739
        }
740

    
741
        return memory_region_get_ram_ptr(block->mr) + offset;
742
    }
743

    
744
    len = qemu_get_byte(f);
745
    qemu_get_buffer(f, (uint8_t *)id, len);
746
    id[len] = 0;
747

    
748
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
749
        if (!strncmp(id, block->idstr, sizeof(id)))
750
            return memory_region_get_ram_ptr(block->mr) + offset;
751
    }
752

    
753
    fprintf(stderr, "Can't find block %s!\n", id);
754
    return NULL;
755
}
756

    
757
static int ram_load(QEMUFile *f, void *opaque, int version_id)
758
{
759
    ram_addr_t addr;
760
    int flags, ret = 0;
761
    int error;
762
    static uint64_t seq_iter;
763

    
764
    seq_iter++;
765

    
766
    if (version_id < 4 || version_id > 4) {
767
        return -EINVAL;
768
    }
769

    
770
    do {
771
        addr = qemu_get_be64(f);
772

    
773
        flags = addr & ~TARGET_PAGE_MASK;
774
        addr &= TARGET_PAGE_MASK;
775

    
776
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
777
            if (version_id == 4) {
778
                /* Synchronize RAM block list */
779
                char id[256];
780
                ram_addr_t length;
781
                ram_addr_t total_ram_bytes = addr;
782

    
783
                while (total_ram_bytes) {
784
                    RAMBlock *block;
785
                    uint8_t len;
786

    
787
                    len = qemu_get_byte(f);
788
                    qemu_get_buffer(f, (uint8_t *)id, len);
789
                    id[len] = 0;
790
                    length = qemu_get_be64(f);
791

    
792
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
793
                        if (!strncmp(id, block->idstr, sizeof(id))) {
794
                            if (block->length != length) {
795
                                ret =  -EINVAL;
796
                                goto done;
797
                            }
798
                            break;
799
                        }
800
                    }
801

    
802
                    if (!block) {
803
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
804
                                "accept migration\n", id);
805
                        ret = -EINVAL;
806
                        goto done;
807
                    }
808

    
809
                    total_ram_bytes -= length;
810
                }
811
            }
812
        }
813

    
814
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
815
            void *host;
816
            uint8_t ch;
817

    
818
            host = host_from_stream_offset(f, addr, flags);
819
            if (!host) {
820
                return -EINVAL;
821
            }
822

    
823
            ch = qemu_get_byte(f);
824
            memset(host, ch, TARGET_PAGE_SIZE);
825
#ifndef _WIN32
826
            if (ch == 0 &&
827
                (!kvm_enabled() || kvm_has_sync_mmu()) &&
828
                getpagesize() <= TARGET_PAGE_SIZE) {
829
                qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
830
            }
831
#endif
832
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
833
            void *host;
834

    
835
            host = host_from_stream_offset(f, addr, flags);
836
            if (!host) {
837
                return -EINVAL;
838
            }
839

    
840
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
841
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
842
            if (!migrate_use_xbzrle()) {
843
                return -EINVAL;
844
            }
845
            void *host = host_from_stream_offset(f, addr, flags);
846
            if (!host) {
847
                return -EINVAL;
848
            }
849

    
850
            if (load_xbzrle(f, addr, host) < 0) {
851
                ret = -EINVAL;
852
                goto done;
853
            }
854
        }
855
        error = qemu_file_get_error(f);
856
        if (error) {
857
            ret = error;
858
            goto done;
859
        }
860
    } while (!(flags & RAM_SAVE_FLAG_EOS));
861

    
862
done:
863
    DPRINTF("Completed load of VM with exit code %d seq iteration "
864
            "%" PRIu64 "\n", ret, seq_iter);
865
    return ret;
866
}
867

    
868
SaveVMHandlers savevm_ram_handlers = {
869
    .save_live_setup = ram_save_setup,
870
    .save_live_iterate = ram_save_iterate,
871
    .save_live_complete = ram_save_complete,
872
    .load_state = ram_load,
873
    .cancel = ram_migration_cancel,
874
};
875

    
876
#ifdef HAS_AUDIO
877
struct soundhw {
878
    const char *name;
879
    const char *descr;
880
    int enabled;
881
    int isa;
882
    union {
883
        int (*init_isa) (ISABus *bus);
884
        int (*init_pci) (PCIBus *bus);
885
    } init;
886
};
887

    
888
static struct soundhw soundhw[] = {
889
#ifdef HAS_AUDIO_CHOICE
890
#ifdef CONFIG_PCSPK
891
    {
892
        "pcspk",
893
        "PC speaker",
894
        0,
895
        1,
896
        { .init_isa = pcspk_audio_init }
897
    },
898
#endif
899

    
900
#ifdef CONFIG_SB16
901
    {
902
        "sb16",
903
        "Creative Sound Blaster 16",
904
        0,
905
        1,
906
        { .init_isa = SB16_init }
907
    },
908
#endif
909

    
910
#ifdef CONFIG_CS4231A
911
    {
912
        "cs4231a",
913
        "CS4231A",
914
        0,
915
        1,
916
        { .init_isa = cs4231a_init }
917
    },
918
#endif
919

    
920
#ifdef CONFIG_ADLIB
921
    {
922
        "adlib",
923
#ifdef HAS_YMF262
924
        "Yamaha YMF262 (OPL3)",
925
#else
926
        "Yamaha YM3812 (OPL2)",
927
#endif
928
        0,
929
        1,
930
        { .init_isa = Adlib_init }
931
    },
932
#endif
933

    
934
#ifdef CONFIG_GUS
935
    {
936
        "gus",
937
        "Gravis Ultrasound GF1",
938
        0,
939
        1,
940
        { .init_isa = GUS_init }
941
    },
942
#endif
943

    
944
#ifdef CONFIG_AC97
945
    {
946
        "ac97",
947
        "Intel 82801AA AC97 Audio",
948
        0,
949
        0,
950
        { .init_pci = ac97_init }
951
    },
952
#endif
953

    
954
#ifdef CONFIG_ES1370
955
    {
956
        "es1370",
957
        "ENSONIQ AudioPCI ES1370",
958
        0,
959
        0,
960
        { .init_pci = es1370_init }
961
    },
962
#endif
963

    
964
#ifdef CONFIG_HDA
965
    {
966
        "hda",
967
        "Intel HD Audio",
968
        0,
969
        0,
970
        { .init_pci = intel_hda_and_codec_init }
971
    },
972
#endif
973

    
974
#endif /* HAS_AUDIO_CHOICE */
975

    
976
    { NULL, NULL, 0, 0, { NULL } }
977
};
978

    
979
void select_soundhw(const char *optarg)
980
{
981
    struct soundhw *c;
982

    
983
    if (is_help_option(optarg)) {
984
    show_valid_cards:
985

    
986
#ifdef HAS_AUDIO_CHOICE
987
        printf("Valid sound card names (comma separated):\n");
988
        for (c = soundhw; c->name; ++c) {
989
            printf ("%-11s %s\n", c->name, c->descr);
990
        }
991
        printf("\n-soundhw all will enable all of the above\n");
992
#else
993
        printf("Machine has no user-selectable audio hardware "
994
               "(it may or may not have always-present audio hardware).\n");
995
#endif
996
        exit(!is_help_option(optarg));
997
    }
998
    else {
999
        size_t l;
1000
        const char *p;
1001
        char *e;
1002
        int bad_card = 0;
1003

    
1004
        if (!strcmp(optarg, "all")) {
1005
            for (c = soundhw; c->name; ++c) {
1006
                c->enabled = 1;
1007
            }
1008
            return;
1009
        }
1010

    
1011
        p = optarg;
1012
        while (*p) {
1013
            e = strchr(p, ',');
1014
            l = !e ? strlen(p) : (size_t) (e - p);
1015

    
1016
            for (c = soundhw; c->name; ++c) {
1017
                if (!strncmp(c->name, p, l) && !c->name[l]) {
1018
                    c->enabled = 1;
1019
                    break;
1020
                }
1021
            }
1022

    
1023
            if (!c->name) {
1024
                if (l > 80) {
1025
                    fprintf(stderr,
1026
                            "Unknown sound card name (too big to show)\n");
1027
                }
1028
                else {
1029
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
1030
                            (int) l, p);
1031
                }
1032
                bad_card = 1;
1033
            }
1034
            p += l + (e != NULL);
1035
        }
1036

    
1037
        if (bad_card) {
1038
            goto show_valid_cards;
1039
        }
1040
    }
1041
}
1042

    
1043
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1044
{
1045
    struct soundhw *c;
1046

    
1047
    for (c = soundhw; c->name; ++c) {
1048
        if (c->enabled) {
1049
            if (c->isa) {
1050
                if (isa_bus) {
1051
                    c->init.init_isa(isa_bus);
1052
                }
1053
            } else {
1054
                if (pci_bus) {
1055
                    c->init.init_pci(pci_bus);
1056
                }
1057
            }
1058
        }
1059
    }
1060
}
1061
#else
1062
void select_soundhw(const char *optarg)
1063
{
1064
}
1065
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1066
{
1067
}
1068
#endif
1069

    
1070
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1071
{
1072
    int ret;
1073

    
1074
    if (strlen(str) != 36) {
1075
        return -1;
1076
    }
1077

    
1078
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1079
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1080
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1081
                 &uuid[15]);
1082

    
1083
    if (ret != 16) {
1084
        return -1;
1085
    }
1086
#ifdef TARGET_I386
1087
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
1088
#endif
1089
    return 0;
1090
}
1091

    
1092
void do_acpitable_option(const char *optarg)
1093
{
1094
#ifdef TARGET_I386
1095
    if (acpi_table_add(optarg) < 0) {
1096
        fprintf(stderr, "Wrong acpi table provided\n");
1097
        exit(1);
1098
    }
1099
#endif
1100
}
1101

    
1102
void do_smbios_option(const char *optarg)
1103
{
1104
#ifdef TARGET_I386
1105
    if (smbios_entry_add(optarg) < 0) {
1106
        fprintf(stderr, "Wrong smbios provided\n");
1107
        exit(1);
1108
    }
1109
#endif
1110
}
1111

    
1112
void cpudef_init(void)
1113
{
1114
#if defined(cpudef_setup)
1115
    cpudef_setup(); /* parse cpu definitions in target config file */
1116
#endif
1117
}
1118

    
1119
int audio_available(void)
1120
{
1121
#ifdef HAS_AUDIO
1122
    return 1;
1123
#else
1124
    return 0;
1125
#endif
1126
}
1127

    
1128
int tcg_available(void)
1129
{
1130
    return 1;
1131
}
1132

    
1133
int kvm_available(void)
1134
{
1135
#ifdef CONFIG_KVM
1136
    return 1;
1137
#else
1138
    return 0;
1139
#endif
1140
}
1141

    
1142
int xen_available(void)
1143
{
1144
#ifdef CONFIG_XEN
1145
    return 1;
1146
#else
1147
    return 0;
1148
#endif
1149
}
1150

    
1151

    
1152
TargetInfo *qmp_query_target(Error **errp)
1153
{
1154
    TargetInfo *info = g_malloc0(sizeof(*info));
1155

    
1156
    info->arch = TARGET_TYPE;
1157

    
1158
    return info;
1159
}