Statistics
| Branch: | Revision:

root / arch_init.c @ 1de7afc9

History | View | Annotate | Download (29.3 kB)

1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include <stdint.h>
25
#include <stdarg.h>
26
#include <stdlib.h>
27
#ifndef _WIN32
28
#include <sys/types.h>
29
#include <sys/mman.h>
30
#endif
31
#include "config.h"
32
#include "monitor/monitor.h"
33
#include "sysemu.h"
34
#include "qemu/bitops.h"
35
#include "qemu/bitmap.h"
36
#include "arch_init.h"
37
#include "audio/audio.h"
38
#include "hw/pc.h"
39
#include "hw/pci/pci.h"
40
#include "hw/audiodev.h"
41
#include "kvm.h"
42
#include "migration/migration.h"
43
#include "exec/gdbstub.h"
44
#include "hw/smbios.h"
45
#include "exec/address-spaces.h"
46
#include "hw/pcspk.h"
47
#include "migration/page_cache.h"
48
#include "qemu/config-file.h"
49
#include "qmp-commands.h"
50
#include "trace.h"
51

    
52
#ifdef DEBUG_ARCH_INIT
53
#define DPRINTF(fmt, ...) \
54
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
55
#else
56
#define DPRINTF(fmt, ...) \
57
    do { } while (0)
58
#endif
59

    
60
#ifdef TARGET_SPARC
61
int graphic_width = 1024;
62
int graphic_height = 768;
63
int graphic_depth = 8;
64
#else
65
int graphic_width = 800;
66
int graphic_height = 600;
67
int graphic_depth = 15;
68
#endif
69

    
70

    
71
#if defined(TARGET_ALPHA)
72
#define QEMU_ARCH QEMU_ARCH_ALPHA
73
#elif defined(TARGET_ARM)
74
#define QEMU_ARCH QEMU_ARCH_ARM
75
#elif defined(TARGET_CRIS)
76
#define QEMU_ARCH QEMU_ARCH_CRIS
77
#elif defined(TARGET_I386)
78
#define QEMU_ARCH QEMU_ARCH_I386
79
#elif defined(TARGET_M68K)
80
#define QEMU_ARCH QEMU_ARCH_M68K
81
#elif defined(TARGET_LM32)
82
#define QEMU_ARCH QEMU_ARCH_LM32
83
#elif defined(TARGET_MICROBLAZE)
84
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
85
#elif defined(TARGET_MIPS)
86
#define QEMU_ARCH QEMU_ARCH_MIPS
87
#elif defined(TARGET_OPENRISC)
88
#define QEMU_ARCH QEMU_ARCH_OPENRISC
89
#elif defined(TARGET_PPC)
90
#define QEMU_ARCH QEMU_ARCH_PPC
91
#elif defined(TARGET_S390X)
92
#define QEMU_ARCH QEMU_ARCH_S390X
93
#elif defined(TARGET_SH4)
94
#define QEMU_ARCH QEMU_ARCH_SH4
95
#elif defined(TARGET_SPARC)
96
#define QEMU_ARCH QEMU_ARCH_SPARC
97
#elif defined(TARGET_XTENSA)
98
#define QEMU_ARCH QEMU_ARCH_XTENSA
99
#elif defined(TARGET_UNICORE32)
100
#define QEMU_ARCH QEMU_ARCH_UNICORE32
101
#endif
102

    
103
const uint32_t arch_type = QEMU_ARCH;
104

    
105
/***********************************************************/
106
/* ram save/restore */
107

    
108
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
109
#define RAM_SAVE_FLAG_COMPRESS 0x02
110
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
111
#define RAM_SAVE_FLAG_PAGE     0x08
112
#define RAM_SAVE_FLAG_EOS      0x10
113
#define RAM_SAVE_FLAG_CONTINUE 0x20
114
#define RAM_SAVE_FLAG_XBZRLE   0x40
115

    
116
#ifdef __ALTIVEC__
117
#include <altivec.h>
118
#define VECTYPE        vector unsigned char
119
#define SPLAT(p)       vec_splat(vec_ld(0, p), 0)
120
#define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
121
/* altivec.h may redefine the bool macro as vector type.
122
 * Reset it to POSIX semantics. */
123
#undef bool
124
#define bool _Bool
125
#elif defined __SSE2__
126
#include <emmintrin.h>
127
#define VECTYPE        __m128i
128
#define SPLAT(p)       _mm_set1_epi8(*(p))
129
#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
130
#else
131
#define VECTYPE        unsigned long
132
#define SPLAT(p)       (*(p) * (~0UL / 255))
133
#define ALL_EQ(v1, v2) ((v1) == (v2))
134
#endif
135

    
136

    
137
static struct defconfig_file {
138
    const char *filename;
139
    /* Indicates it is an user config file (disabled by -no-user-config) */
140
    bool userconfig;
141
} default_config_files[] = {
142
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
143
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
144
    { NULL }, /* end of list */
145
};
146

    
147

    
148
int qemu_read_default_config_files(bool userconfig)
149
{
150
    int ret;
151
    struct defconfig_file *f;
152

    
153
    for (f = default_config_files; f->filename; f++) {
154
        if (!userconfig && f->userconfig) {
155
            continue;
156
        }
157
        ret = qemu_read_config_file(f->filename);
158
        if (ret < 0 && ret != -ENOENT) {
159
            return ret;
160
        }
161
    }
162
    
163
    return 0;
164
}
165

    
166
static int is_dup_page(uint8_t *page)
167
{
168
    VECTYPE *p = (VECTYPE *)page;
169
    VECTYPE val = SPLAT(page);
170
    int i;
171

    
172
    for (i = 0; i < TARGET_PAGE_SIZE / sizeof(VECTYPE); i++) {
173
        if (!ALL_EQ(val, p[i])) {
174
            return 0;
175
        }
176
    }
177

    
178
    return 1;
179
}
180

    
181
/* struct contains XBZRLE cache and a static page
182
   used by the compression */
183
static struct {
184
    /* buffer used for XBZRLE encoding */
185
    uint8_t *encoded_buf;
186
    /* buffer for storing page content */
187
    uint8_t *current_buf;
188
    /* buffer used for XBZRLE decoding */
189
    uint8_t *decoded_buf;
190
    /* Cache for XBZRLE */
191
    PageCache *cache;
192
} XBZRLE = {
193
    .encoded_buf = NULL,
194
    .current_buf = NULL,
195
    .decoded_buf = NULL,
196
    .cache = NULL,
197
};
198

    
199

    
200
int64_t xbzrle_cache_resize(int64_t new_size)
201
{
202
    if (XBZRLE.cache != NULL) {
203
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
204
            TARGET_PAGE_SIZE;
205
    }
206
    return pow2floor(new_size);
207
}
208

    
209
/* accounting for migration statistics */
210
typedef struct AccountingInfo {
211
    uint64_t dup_pages;
212
    uint64_t norm_pages;
213
    uint64_t iterations;
214
    uint64_t xbzrle_bytes;
215
    uint64_t xbzrle_pages;
216
    uint64_t xbzrle_cache_miss;
217
    uint64_t xbzrle_overflows;
218
} AccountingInfo;
219

    
220
static AccountingInfo acct_info;
221

    
222
static void acct_clear(void)
223
{
224
    memset(&acct_info, 0, sizeof(acct_info));
225
}
226

    
227
uint64_t dup_mig_bytes_transferred(void)
228
{
229
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
230
}
231

    
232
uint64_t dup_mig_pages_transferred(void)
233
{
234
    return acct_info.dup_pages;
235
}
236

    
237
uint64_t norm_mig_bytes_transferred(void)
238
{
239
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
240
}
241

    
242
uint64_t norm_mig_pages_transferred(void)
243
{
244
    return acct_info.norm_pages;
245
}
246

    
247
uint64_t xbzrle_mig_bytes_transferred(void)
248
{
249
    return acct_info.xbzrle_bytes;
250
}
251

    
252
uint64_t xbzrle_mig_pages_transferred(void)
253
{
254
    return acct_info.xbzrle_pages;
255
}
256

    
257
uint64_t xbzrle_mig_pages_cache_miss(void)
258
{
259
    return acct_info.xbzrle_cache_miss;
260
}
261

    
262
uint64_t xbzrle_mig_pages_overflow(void)
263
{
264
    return acct_info.xbzrle_overflows;
265
}
266

    
267
static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
268
        int cont, int flag)
269
{
270
        qemu_put_be64(f, offset | cont | flag);
271
        if (!cont) {
272
                qemu_put_byte(f, strlen(block->idstr));
273
                qemu_put_buffer(f, (uint8_t *)block->idstr,
274
                                strlen(block->idstr));
275
        }
276

    
277
}
278

    
279
#define ENCODING_FLAG_XBZRLE 0x1
280

    
281
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
282
                            ram_addr_t current_addr, RAMBlock *block,
283
                            ram_addr_t offset, int cont, bool last_stage)
284
{
285
    int encoded_len = 0, bytes_sent = -1;
286
    uint8_t *prev_cached_page;
287

    
288
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
289
        if (!last_stage) {
290
            cache_insert(XBZRLE.cache, current_addr,
291
                         g_memdup(current_data, TARGET_PAGE_SIZE));
292
        }
293
        acct_info.xbzrle_cache_miss++;
294
        return -1;
295
    }
296

    
297
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
298

    
299
    /* save current buffer into memory */
300
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
301

    
302
    /* XBZRLE encoding (if there is no overflow) */
303
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
304
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
305
                                       TARGET_PAGE_SIZE);
306
    if (encoded_len == 0) {
307
        DPRINTF("Skipping unmodified page\n");
308
        return 0;
309
    } else if (encoded_len == -1) {
310
        DPRINTF("Overflow\n");
311
        acct_info.xbzrle_overflows++;
312
        /* update data in the cache */
313
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
314
        return -1;
315
    }
316

    
317
    /* we need to update the data in the cache, in order to get the same data */
318
    if (!last_stage) {
319
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
320
    }
321

    
322
    /* Send XBZRLE based compressed page */
323
    save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
324
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
325
    qemu_put_be16(f, encoded_len);
326
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
327
    bytes_sent = encoded_len + 1 + 2;
328
    acct_info.xbzrle_pages++;
329
    acct_info.xbzrle_bytes += bytes_sent;
330

    
331
    return bytes_sent;
332
}
333

    
334
static RAMBlock *last_block;
335
static ram_addr_t last_offset;
336
static unsigned long *migration_bitmap;
337
static uint64_t migration_dirty_pages;
338

    
339
static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
340
                                                         ram_addr_t offset)
341
{
342
    bool ret;
343
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
344

    
345
    ret = test_and_clear_bit(nr, migration_bitmap);
346

    
347
    if (ret) {
348
        migration_dirty_pages--;
349
    }
350
    return ret;
351
}
352

    
353
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
354
                                              ram_addr_t offset)
355
{
356
    bool ret;
357
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
358

    
359
    ret = test_and_set_bit(nr, migration_bitmap);
360

    
361
    if (!ret) {
362
        migration_dirty_pages++;
363
    }
364
    return ret;
365
}
366

    
367
static void migration_bitmap_sync(void)
368
{
369
    RAMBlock *block;
370
    ram_addr_t addr;
371
    uint64_t num_dirty_pages_init = migration_dirty_pages;
372
    MigrationState *s = migrate_get_current();
373
    static int64_t start_time;
374
    static int64_t num_dirty_pages_period;
375
    int64_t end_time;
376

    
377
    if (!start_time) {
378
        start_time = qemu_get_clock_ms(rt_clock);
379
    }
380

    
381
    trace_migration_bitmap_sync_start();
382
    memory_global_sync_dirty_bitmap(get_system_memory());
383

    
384
    QLIST_FOREACH(block, &ram_list.blocks, next) {
385
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
386
            if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
387
                                        DIRTY_MEMORY_MIGRATION)) {
388
                migration_bitmap_set_dirty(block->mr, addr);
389
            }
390
        }
391
        memory_region_reset_dirty(block->mr, 0, block->length,
392
                                  DIRTY_MEMORY_MIGRATION);
393
    }
394
    trace_migration_bitmap_sync_end(migration_dirty_pages
395
                                    - num_dirty_pages_init);
396
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
397
    end_time = qemu_get_clock_ms(rt_clock);
398

    
399
    /* more than 1 second = 1000 millisecons */
400
    if (end_time > start_time + 1000) {
401
        s->dirty_pages_rate = num_dirty_pages_period * 1000
402
            / (end_time - start_time);
403
        start_time = end_time;
404
        num_dirty_pages_period = 0;
405
    }
406
}
407

    
408

    
409
/*
410
 * ram_save_block: Writes a page of memory to the stream f
411
 *
412
 * Returns:  0: if the page hasn't changed
413
 *          -1: if there are no more dirty pages
414
 *           n: the amount of bytes written in other case
415
 */
416

    
417
static int ram_save_block(QEMUFile *f, bool last_stage)
418
{
419
    RAMBlock *block = last_block;
420
    ram_addr_t offset = last_offset;
421
    int bytes_sent = -1;
422
    MemoryRegion *mr;
423
    ram_addr_t current_addr;
424

    
425
    if (!block)
426
        block = QLIST_FIRST(&ram_list.blocks);
427

    
428
    do {
429
        mr = block->mr;
430
        if (migration_bitmap_test_and_reset_dirty(mr, offset)) {
431
            uint8_t *p;
432
            int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
433

    
434
            p = memory_region_get_ram_ptr(mr) + offset;
435

    
436
            if (is_dup_page(p)) {
437
                acct_info.dup_pages++;
438
                save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
439
                qemu_put_byte(f, *p);
440
                bytes_sent = 1;
441
            } else if (migrate_use_xbzrle()) {
442
                current_addr = block->offset + offset;
443
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
444
                                              offset, cont, last_stage);
445
                if (!last_stage) {
446
                    p = get_cached_data(XBZRLE.cache, current_addr);
447
                }
448
            }
449

    
450
            /* either we didn't send yet (we may have had XBZRLE overflow) */
451
            if (bytes_sent == -1) {
452
                save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
453
                qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
454
                bytes_sent = TARGET_PAGE_SIZE;
455
                acct_info.norm_pages++;
456
            }
457

    
458
            /* if page is unmodified, continue to the next */
459
            if (bytes_sent != 0) {
460
                break;
461
            }
462
        }
463

    
464
        offset += TARGET_PAGE_SIZE;
465
        if (offset >= block->length) {
466
            offset = 0;
467
            block = QLIST_NEXT(block, next);
468
            if (!block)
469
                block = QLIST_FIRST(&ram_list.blocks);
470
        }
471
    } while (block != last_block || offset != last_offset);
472

    
473
    last_block = block;
474
    last_offset = offset;
475

    
476
    return bytes_sent;
477
}
478

    
479
static uint64_t bytes_transferred;
480

    
481
static ram_addr_t ram_save_remaining(void)
482
{
483
    return migration_dirty_pages;
484
}
485

    
486
uint64_t ram_bytes_remaining(void)
487
{
488
    return ram_save_remaining() * TARGET_PAGE_SIZE;
489
}
490

    
491
uint64_t ram_bytes_transferred(void)
492
{
493
    return bytes_transferred;
494
}
495

    
496
uint64_t ram_bytes_total(void)
497
{
498
    RAMBlock *block;
499
    uint64_t total = 0;
500

    
501
    QLIST_FOREACH(block, &ram_list.blocks, next)
502
        total += block->length;
503

    
504
    return total;
505
}
506

    
507
static int block_compar(const void *a, const void *b)
508
{
509
    RAMBlock * const *ablock = a;
510
    RAMBlock * const *bblock = b;
511

    
512
    return strcmp((*ablock)->idstr, (*bblock)->idstr);
513
}
514

    
515
static void sort_ram_list(void)
516
{
517
    RAMBlock *block, *nblock, **blocks;
518
    int n;
519
    n = 0;
520
    QLIST_FOREACH(block, &ram_list.blocks, next) {
521
        ++n;
522
    }
523
    blocks = g_malloc(n * sizeof *blocks);
524
    n = 0;
525
    QLIST_FOREACH_SAFE(block, &ram_list.blocks, next, nblock) {
526
        blocks[n++] = block;
527
        QLIST_REMOVE(block, next);
528
    }
529
    qsort(blocks, n, sizeof *blocks, block_compar);
530
    while (--n >= 0) {
531
        QLIST_INSERT_HEAD(&ram_list.blocks, blocks[n], next);
532
    }
533
    g_free(blocks);
534
}
535

    
536
static void migration_end(void)
537
{
538
    memory_global_dirty_log_stop();
539

    
540
    if (migrate_use_xbzrle()) {
541
        cache_fini(XBZRLE.cache);
542
        g_free(XBZRLE.cache);
543
        g_free(XBZRLE.encoded_buf);
544
        g_free(XBZRLE.current_buf);
545
        g_free(XBZRLE.decoded_buf);
546
        XBZRLE.cache = NULL;
547
    }
548
}
549

    
550
static void ram_migration_cancel(void *opaque)
551
{
552
    migration_end();
553
}
554

    
555

    
556
static void reset_ram_globals(void)
557
{
558
    last_block = NULL;
559
    last_offset = 0;
560
    sort_ram_list();
561
}
562

    
563
#define MAX_WAIT 50 /* ms, half buffered_file limit */
564

    
565
static int ram_save_setup(QEMUFile *f, void *opaque)
566
{
567
    RAMBlock *block;
568
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
569

    
570
    migration_bitmap = bitmap_new(ram_pages);
571
    bitmap_set(migration_bitmap, 0, ram_pages);
572
    migration_dirty_pages = ram_pages;
573

    
574
    bytes_transferred = 0;
575
    reset_ram_globals();
576

    
577
    if (migrate_use_xbzrle()) {
578
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
579
                                  TARGET_PAGE_SIZE,
580
                                  TARGET_PAGE_SIZE);
581
        if (!XBZRLE.cache) {
582
            DPRINTF("Error creating cache\n");
583
            return -1;
584
        }
585
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
586
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
587
        acct_clear();
588
    }
589

    
590
    memory_global_dirty_log_start();
591
    migration_bitmap_sync();
592

    
593
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
594

    
595
    QLIST_FOREACH(block, &ram_list.blocks, next) {
596
        qemu_put_byte(f, strlen(block->idstr));
597
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
598
        qemu_put_be64(f, block->length);
599
    }
600

    
601
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
602

    
603
    return 0;
604
}
605

    
606
static int ram_save_iterate(QEMUFile *f, void *opaque)
607
{
608
    uint64_t bytes_transferred_last;
609
    double bwidth = 0;
610
    int ret;
611
    int i;
612
    uint64_t expected_downtime;
613
    MigrationState *s = migrate_get_current();
614

    
615
    bytes_transferred_last = bytes_transferred;
616
    bwidth = qemu_get_clock_ns(rt_clock);
617

    
618
    i = 0;
619
    while ((ret = qemu_file_rate_limit(f)) == 0) {
620
        int bytes_sent;
621

    
622
        bytes_sent = ram_save_block(f, false);
623
        /* no more blocks to sent */
624
        if (bytes_sent < 0) {
625
            break;
626
        }
627
        bytes_transferred += bytes_sent;
628
        acct_info.iterations++;
629
        /* we want to check in the 1st loop, just in case it was the 1st time
630
           and we had to sync the dirty bitmap.
631
           qemu_get_clock_ns() is a bit expensive, so we only check each some
632
           iterations
633
        */
634
        if ((i & 63) == 0) {
635
            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
636
            if (t1 > MAX_WAIT) {
637
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
638
                        t1, i);
639
                break;
640
            }
641
        }
642
        i++;
643
    }
644

    
645
    if (ret < 0) {
646
        return ret;
647
    }
648

    
649
    bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
650
    bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
651

    
652
    /* if we haven't transferred anything this round, force
653
     * expected_downtime to a very high value, but without
654
     * crashing */
655
    if (bwidth == 0) {
656
        bwidth = 0.000001;
657
    }
658

    
659
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
660

    
661
    expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
662
    DPRINTF("ram_save_live: expected(%" PRIu64 ") <= max(" PRIu64 ")?\n",
663
            expected_downtime, migrate_max_downtime());
664

    
665
    if (expected_downtime <= migrate_max_downtime()) {
666
        migration_bitmap_sync();
667
        expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
668
        s->expected_downtime = expected_downtime / 1000000; /* ns -> ms */
669

    
670
        return expected_downtime <= migrate_max_downtime();
671
    }
672
    return 0;
673
}
674

    
675
static int ram_save_complete(QEMUFile *f, void *opaque)
676
{
677
    migration_bitmap_sync();
678

    
679
    /* try transferring iterative blocks of memory */
680

    
681
    /* flush all remaining blocks regardless of rate limiting */
682
    while (true) {
683
        int bytes_sent;
684

    
685
        bytes_sent = ram_save_block(f, true);
686
        /* no more blocks to sent */
687
        if (bytes_sent < 0) {
688
            break;
689
        }
690
        bytes_transferred += bytes_sent;
691
    }
692
    memory_global_dirty_log_stop();
693

    
694
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
695

    
696
    g_free(migration_bitmap);
697
    migration_bitmap = NULL;
698

    
699
    return 0;
700
}
701

    
702
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
703
{
704
    int ret, rc = 0;
705
    unsigned int xh_len;
706
    int xh_flags;
707

    
708
    if (!XBZRLE.decoded_buf) {
709
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
710
    }
711

    
712
    /* extract RLE header */
713
    xh_flags = qemu_get_byte(f);
714
    xh_len = qemu_get_be16(f);
715

    
716
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
717
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
718
        return -1;
719
    }
720

    
721
    if (xh_len > TARGET_PAGE_SIZE) {
722
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
723
        return -1;
724
    }
725
    /* load data and decode */
726
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
727

    
728
    /* decode RLE */
729
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
730
                               TARGET_PAGE_SIZE);
731
    if (ret == -1) {
732
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
733
        rc = -1;
734
    } else  if (ret > TARGET_PAGE_SIZE) {
735
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
736
                ret, TARGET_PAGE_SIZE);
737
        abort();
738
    }
739

    
740
    return rc;
741
}
742

    
743
static inline void *host_from_stream_offset(QEMUFile *f,
744
                                            ram_addr_t offset,
745
                                            int flags)
746
{
747
    static RAMBlock *block = NULL;
748
    char id[256];
749
    uint8_t len;
750

    
751
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
752
        if (!block) {
753
            fprintf(stderr, "Ack, bad migration stream!\n");
754
            return NULL;
755
        }
756

    
757
        return memory_region_get_ram_ptr(block->mr) + offset;
758
    }
759

    
760
    len = qemu_get_byte(f);
761
    qemu_get_buffer(f, (uint8_t *)id, len);
762
    id[len] = 0;
763

    
764
    QLIST_FOREACH(block, &ram_list.blocks, next) {
765
        if (!strncmp(id, block->idstr, sizeof(id)))
766
            return memory_region_get_ram_ptr(block->mr) + offset;
767
    }
768

    
769
    fprintf(stderr, "Can't find block %s!\n", id);
770
    return NULL;
771
}
772

    
773
static int ram_load(QEMUFile *f, void *opaque, int version_id)
774
{
775
    ram_addr_t addr;
776
    int flags, ret = 0;
777
    int error;
778
    static uint64_t seq_iter;
779

    
780
    seq_iter++;
781

    
782
    if (version_id < 4 || version_id > 4) {
783
        return -EINVAL;
784
    }
785

    
786
    do {
787
        addr = qemu_get_be64(f);
788

    
789
        flags = addr & ~TARGET_PAGE_MASK;
790
        addr &= TARGET_PAGE_MASK;
791

    
792
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
793
            if (version_id == 4) {
794
                /* Synchronize RAM block list */
795
                char id[256];
796
                ram_addr_t length;
797
                ram_addr_t total_ram_bytes = addr;
798

    
799
                while (total_ram_bytes) {
800
                    RAMBlock *block;
801
                    uint8_t len;
802

    
803
                    len = qemu_get_byte(f);
804
                    qemu_get_buffer(f, (uint8_t *)id, len);
805
                    id[len] = 0;
806
                    length = qemu_get_be64(f);
807

    
808
                    QLIST_FOREACH(block, &ram_list.blocks, next) {
809
                        if (!strncmp(id, block->idstr, sizeof(id))) {
810
                            if (block->length != length) {
811
                                ret =  -EINVAL;
812
                                goto done;
813
                            }
814
                            break;
815
                        }
816
                    }
817

    
818
                    if (!block) {
819
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
820
                                "accept migration\n", id);
821
                        ret = -EINVAL;
822
                        goto done;
823
                    }
824

    
825
                    total_ram_bytes -= length;
826
                }
827
            }
828
        }
829

    
830
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
831
            void *host;
832
            uint8_t ch;
833

    
834
            host = host_from_stream_offset(f, addr, flags);
835
            if (!host) {
836
                return -EINVAL;
837
            }
838

    
839
            ch = qemu_get_byte(f);
840
            memset(host, ch, TARGET_PAGE_SIZE);
841
#ifndef _WIN32
842
            if (ch == 0 &&
843
                (!kvm_enabled() || kvm_has_sync_mmu()) &&
844
                getpagesize() <= TARGET_PAGE_SIZE) {
845
                qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
846
            }
847
#endif
848
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
849
            void *host;
850

    
851
            host = host_from_stream_offset(f, addr, flags);
852
            if (!host) {
853
                return -EINVAL;
854
            }
855

    
856
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
857
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
858
            if (!migrate_use_xbzrle()) {
859
                return -EINVAL;
860
            }
861
            void *host = host_from_stream_offset(f, addr, flags);
862
            if (!host) {
863
                return -EINVAL;
864
            }
865

    
866
            if (load_xbzrle(f, addr, host) < 0) {
867
                ret = -EINVAL;
868
                goto done;
869
            }
870
        }
871
        error = qemu_file_get_error(f);
872
        if (error) {
873
            ret = error;
874
            goto done;
875
        }
876
    } while (!(flags & RAM_SAVE_FLAG_EOS));
877

    
878
done:
879
    DPRINTF("Completed load of VM with exit code %d seq iteration "
880
            "%" PRIu64 "\n", ret, seq_iter);
881
    return ret;
882
}
883

    
884
SaveVMHandlers savevm_ram_handlers = {
885
    .save_live_setup = ram_save_setup,
886
    .save_live_iterate = ram_save_iterate,
887
    .save_live_complete = ram_save_complete,
888
    .load_state = ram_load,
889
    .cancel = ram_migration_cancel,
890
};
891

    
892
#ifdef HAS_AUDIO
893
struct soundhw {
894
    const char *name;
895
    const char *descr;
896
    int enabled;
897
    int isa;
898
    union {
899
        int (*init_isa) (ISABus *bus);
900
        int (*init_pci) (PCIBus *bus);
901
    } init;
902
};
903

    
904
static struct soundhw soundhw[] = {
905
#ifdef HAS_AUDIO_CHOICE
906
#ifdef CONFIG_PCSPK
907
    {
908
        "pcspk",
909
        "PC speaker",
910
        0,
911
        1,
912
        { .init_isa = pcspk_audio_init }
913
    },
914
#endif
915

    
916
#ifdef CONFIG_SB16
917
    {
918
        "sb16",
919
        "Creative Sound Blaster 16",
920
        0,
921
        1,
922
        { .init_isa = SB16_init }
923
    },
924
#endif
925

    
926
#ifdef CONFIG_CS4231A
927
    {
928
        "cs4231a",
929
        "CS4231A",
930
        0,
931
        1,
932
        { .init_isa = cs4231a_init }
933
    },
934
#endif
935

    
936
#ifdef CONFIG_ADLIB
937
    {
938
        "adlib",
939
#ifdef HAS_YMF262
940
        "Yamaha YMF262 (OPL3)",
941
#else
942
        "Yamaha YM3812 (OPL2)",
943
#endif
944
        0,
945
        1,
946
        { .init_isa = Adlib_init }
947
    },
948
#endif
949

    
950
#ifdef CONFIG_GUS
951
    {
952
        "gus",
953
        "Gravis Ultrasound GF1",
954
        0,
955
        1,
956
        { .init_isa = GUS_init }
957
    },
958
#endif
959

    
960
#ifdef CONFIG_AC97
961
    {
962
        "ac97",
963
        "Intel 82801AA AC97 Audio",
964
        0,
965
        0,
966
        { .init_pci = ac97_init }
967
    },
968
#endif
969

    
970
#ifdef CONFIG_ES1370
971
    {
972
        "es1370",
973
        "ENSONIQ AudioPCI ES1370",
974
        0,
975
        0,
976
        { .init_pci = es1370_init }
977
    },
978
#endif
979

    
980
#ifdef CONFIG_HDA
981
    {
982
        "hda",
983
        "Intel HD Audio",
984
        0,
985
        0,
986
        { .init_pci = intel_hda_and_codec_init }
987
    },
988
#endif
989

    
990
#endif /* HAS_AUDIO_CHOICE */
991

    
992
    { NULL, NULL, 0, 0, { NULL } }
993
};
994

    
995
void select_soundhw(const char *optarg)
996
{
997
    struct soundhw *c;
998

    
999
    if (is_help_option(optarg)) {
1000
    show_valid_cards:
1001

    
1002
#ifdef HAS_AUDIO_CHOICE
1003
        printf("Valid sound card names (comma separated):\n");
1004
        for (c = soundhw; c->name; ++c) {
1005
            printf ("%-11s %s\n", c->name, c->descr);
1006
        }
1007
        printf("\n-soundhw all will enable all of the above\n");
1008
#else
1009
        printf("Machine has no user-selectable audio hardware "
1010
               "(it may or may not have always-present audio hardware).\n");
1011
#endif
1012
        exit(!is_help_option(optarg));
1013
    }
1014
    else {
1015
        size_t l;
1016
        const char *p;
1017
        char *e;
1018
        int bad_card = 0;
1019

    
1020
        if (!strcmp(optarg, "all")) {
1021
            for (c = soundhw; c->name; ++c) {
1022
                c->enabled = 1;
1023
            }
1024
            return;
1025
        }
1026

    
1027
        p = optarg;
1028
        while (*p) {
1029
            e = strchr(p, ',');
1030
            l = !e ? strlen(p) : (size_t) (e - p);
1031

    
1032
            for (c = soundhw; c->name; ++c) {
1033
                if (!strncmp(c->name, p, l) && !c->name[l]) {
1034
                    c->enabled = 1;
1035
                    break;
1036
                }
1037
            }
1038

    
1039
            if (!c->name) {
1040
                if (l > 80) {
1041
                    fprintf(stderr,
1042
                            "Unknown sound card name (too big to show)\n");
1043
                }
1044
                else {
1045
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
1046
                            (int) l, p);
1047
                }
1048
                bad_card = 1;
1049
            }
1050
            p += l + (e != NULL);
1051
        }
1052

    
1053
        if (bad_card) {
1054
            goto show_valid_cards;
1055
        }
1056
    }
1057
}
1058

    
1059
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1060
{
1061
    struct soundhw *c;
1062

    
1063
    for (c = soundhw; c->name; ++c) {
1064
        if (c->enabled) {
1065
            if (c->isa) {
1066
                if (isa_bus) {
1067
                    c->init.init_isa(isa_bus);
1068
                }
1069
            } else {
1070
                if (pci_bus) {
1071
                    c->init.init_pci(pci_bus);
1072
                }
1073
            }
1074
        }
1075
    }
1076
}
1077
#else
1078
void select_soundhw(const char *optarg)
1079
{
1080
}
1081
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1082
{
1083
}
1084
#endif
1085

    
1086
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1087
{
1088
    int ret;
1089

    
1090
    if (strlen(str) != 36) {
1091
        return -1;
1092
    }
1093

    
1094
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1095
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1096
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1097
                 &uuid[15]);
1098

    
1099
    if (ret != 16) {
1100
        return -1;
1101
    }
1102
#ifdef TARGET_I386
1103
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
1104
#endif
1105
    return 0;
1106
}
1107

    
1108
void do_acpitable_option(const char *optarg)
1109
{
1110
#ifdef TARGET_I386
1111
    if (acpi_table_add(optarg) < 0) {
1112
        fprintf(stderr, "Wrong acpi table provided\n");
1113
        exit(1);
1114
    }
1115
#endif
1116
}
1117

    
1118
void do_smbios_option(const char *optarg)
1119
{
1120
#ifdef TARGET_I386
1121
    if (smbios_entry_add(optarg) < 0) {
1122
        fprintf(stderr, "Wrong smbios provided\n");
1123
        exit(1);
1124
    }
1125
#endif
1126
}
1127

    
1128
void cpudef_init(void)
1129
{
1130
#if defined(cpudef_setup)
1131
    cpudef_setup(); /* parse cpu definitions in target config file */
1132
#endif
1133
}
1134

    
1135
int audio_available(void)
1136
{
1137
#ifdef HAS_AUDIO
1138
    return 1;
1139
#else
1140
    return 0;
1141
#endif
1142
}
1143

    
1144
int tcg_available(void)
1145
{
1146
    return 1;
1147
}
1148

    
1149
int kvm_available(void)
1150
{
1151
#ifdef CONFIG_KVM
1152
    return 1;
1153
#else
1154
    return 0;
1155
#endif
1156
}
1157

    
1158
int xen_available(void)
1159
{
1160
#ifdef CONFIG_XEN
1161
    return 1;
1162
#else
1163
    return 0;
1164
#endif
1165
}
1166

    
1167

    
1168
TargetInfo *qmp_query_target(Error **errp)
1169
{
1170
    TargetInfo *info = g_malloc0(sizeof(*info));
1171

    
1172
    info->arch = TARGET_TYPE;
1173

    
1174
    return info;
1175
}