Statistics
| Branch: | Revision:

root / arch_init.c @ 36cd6f6f

History | View | Annotate | Download (28.8 kB)

1
/*
2
 * QEMU System Emulator
3
 *
4
 * Copyright (c) 2003-2008 Fabrice Bellard
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7
 * of this software and associated documentation files (the "Software"), to deal
8
 * in the Software without restriction, including without limitation the rights
9
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the Software is
11
 * furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22
 * THE SOFTWARE.
23
 */
24
#include <stdint.h>
25
#include <stdarg.h>
26
#include <stdlib.h>
27
#ifndef _WIN32
28
#include <sys/types.h>
29
#include <sys/mman.h>
30
#endif
31
#include "config.h"
32
#include "monitor/monitor.h"
33
#include "sysemu/sysemu.h"
34
#include "qemu/bitops.h"
35
#include "qemu/bitmap.h"
36
#include "sysemu/arch_init.h"
37
#include "audio/audio.h"
38
#include "hw/i386/pc.h"
39
#include "hw/pci/pci.h"
40
#include "hw/audio/audio.h"
41
#include "sysemu/kvm.h"
42
#include "migration/migration.h"
43
#include "hw/i386/smbios.h"
44
#include "exec/address-spaces.h"
45
#include "hw/audio/pcspk.h"
46
#include "migration/page_cache.h"
47
#include "qemu/config-file.h"
48
#include "qmp-commands.h"
49
#include "trace.h"
50
#include "exec/cpu-all.h"
51
#include "hw/acpi/acpi.h"
52

    
53
#ifdef DEBUG_ARCH_INIT
54
#define DPRINTF(fmt, ...) \
55
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
56
#else
57
#define DPRINTF(fmt, ...) \
58
    do { } while (0)
59
#endif
60

    
61
#ifdef TARGET_SPARC
62
int graphic_width = 1024;
63
int graphic_height = 768;
64
int graphic_depth = 8;
65
#else
66
int graphic_width = 800;
67
int graphic_height = 600;
68
int graphic_depth = 15;
69
#endif
70

    
71

    
72
#if defined(TARGET_ALPHA)
73
#define QEMU_ARCH QEMU_ARCH_ALPHA
74
#elif defined(TARGET_ARM)
75
#define QEMU_ARCH QEMU_ARCH_ARM
76
#elif defined(TARGET_CRIS)
77
#define QEMU_ARCH QEMU_ARCH_CRIS
78
#elif defined(TARGET_I386)
79
#define QEMU_ARCH QEMU_ARCH_I386
80
#elif defined(TARGET_M68K)
81
#define QEMU_ARCH QEMU_ARCH_M68K
82
#elif defined(TARGET_LM32)
83
#define QEMU_ARCH QEMU_ARCH_LM32
84
#elif defined(TARGET_MICROBLAZE)
85
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
86
#elif defined(TARGET_MIPS)
87
#define QEMU_ARCH QEMU_ARCH_MIPS
88
#elif defined(TARGET_MOXIE)
89
#define QEMU_ARCH QEMU_ARCH_MOXIE
90
#elif defined(TARGET_OPENRISC)
91
#define QEMU_ARCH QEMU_ARCH_OPENRISC
92
#elif defined(TARGET_PPC)
93
#define QEMU_ARCH QEMU_ARCH_PPC
94
#elif defined(TARGET_S390X)
95
#define QEMU_ARCH QEMU_ARCH_S390X
96
#elif defined(TARGET_SH4)
97
#define QEMU_ARCH QEMU_ARCH_SH4
98
#elif defined(TARGET_SPARC)
99
#define QEMU_ARCH QEMU_ARCH_SPARC
100
#elif defined(TARGET_XTENSA)
101
#define QEMU_ARCH QEMU_ARCH_XTENSA
102
#elif defined(TARGET_UNICORE32)
103
#define QEMU_ARCH QEMU_ARCH_UNICORE32
104
#endif
105

    
106
const uint32_t arch_type = QEMU_ARCH;
107

    
108
/***********************************************************/
109
/* ram save/restore */
110

    
111
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
112
#define RAM_SAVE_FLAG_COMPRESS 0x02
113
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
114
#define RAM_SAVE_FLAG_PAGE     0x08
115
#define RAM_SAVE_FLAG_EOS      0x10
116
#define RAM_SAVE_FLAG_CONTINUE 0x20
117
#define RAM_SAVE_FLAG_XBZRLE   0x40
118

    
119

    
120
static struct defconfig_file {
121
    const char *filename;
122
    /* Indicates it is an user config file (disabled by -no-user-config) */
123
    bool userconfig;
124
} default_config_files[] = {
125
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
126
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
127
    { NULL }, /* end of list */
128
};
129

    
130

    
131
int qemu_read_default_config_files(bool userconfig)
132
{
133
    int ret;
134
    struct defconfig_file *f;
135

    
136
    for (f = default_config_files; f->filename; f++) {
137
        if (!userconfig && f->userconfig) {
138
            continue;
139
        }
140
        ret = qemu_read_config_file(f->filename);
141
        if (ret < 0 && ret != -ENOENT) {
142
            return ret;
143
        }
144
    }
145

    
146
    return 0;
147
}
148

    
149
static inline bool is_zero_page(uint8_t *p)
150
{
151
    return buffer_find_nonzero_offset(p, TARGET_PAGE_SIZE) ==
152
        TARGET_PAGE_SIZE;
153
}
154

    
155
/* struct contains XBZRLE cache and a static page
156
   used by the compression */
157
static struct {
158
    /* buffer used for XBZRLE encoding */
159
    uint8_t *encoded_buf;
160
    /* buffer for storing page content */
161
    uint8_t *current_buf;
162
    /* buffer used for XBZRLE decoding */
163
    uint8_t *decoded_buf;
164
    /* Cache for XBZRLE */
165
    PageCache *cache;
166
} XBZRLE = {
167
    .encoded_buf = NULL,
168
    .current_buf = NULL,
169
    .decoded_buf = NULL,
170
    .cache = NULL,
171
};
172

    
173

    
174
int64_t xbzrle_cache_resize(int64_t new_size)
175
{
176
    if (XBZRLE.cache != NULL) {
177
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
178
            TARGET_PAGE_SIZE;
179
    }
180
    return pow2floor(new_size);
181
}
182

    
183
/* accounting for migration statistics */
184
typedef struct AccountingInfo {
185
    uint64_t dup_pages;
186
    uint64_t skipped_pages;
187
    uint64_t norm_pages;
188
    uint64_t iterations;
189
    uint64_t xbzrle_bytes;
190
    uint64_t xbzrle_pages;
191
    uint64_t xbzrle_cache_miss;
192
    uint64_t xbzrle_overflows;
193
} AccountingInfo;
194

    
195
static AccountingInfo acct_info;
196

    
197
static void acct_clear(void)
198
{
199
    memset(&acct_info, 0, sizeof(acct_info));
200
}
201

    
202
uint64_t dup_mig_bytes_transferred(void)
203
{
204
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
205
}
206

    
207
uint64_t dup_mig_pages_transferred(void)
208
{
209
    return acct_info.dup_pages;
210
}
211

    
212
uint64_t skipped_mig_bytes_transferred(void)
213
{
214
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
215
}
216

    
217
uint64_t skipped_mig_pages_transferred(void)
218
{
219
    return acct_info.skipped_pages;
220
}
221

    
222
uint64_t norm_mig_bytes_transferred(void)
223
{
224
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
225
}
226

    
227
uint64_t norm_mig_pages_transferred(void)
228
{
229
    return acct_info.norm_pages;
230
}
231

    
232
uint64_t xbzrle_mig_bytes_transferred(void)
233
{
234
    return acct_info.xbzrle_bytes;
235
}
236

    
237
uint64_t xbzrle_mig_pages_transferred(void)
238
{
239
    return acct_info.xbzrle_pages;
240
}
241

    
242
uint64_t xbzrle_mig_pages_cache_miss(void)
243
{
244
    return acct_info.xbzrle_cache_miss;
245
}
246

    
247
uint64_t xbzrle_mig_pages_overflow(void)
248
{
249
    return acct_info.xbzrle_overflows;
250
}
251

    
252
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
253
                             int cont, int flag)
254
{
255
    size_t size;
256

    
257
    qemu_put_be64(f, offset | cont | flag);
258
    size = 8;
259

    
260
    if (!cont) {
261
        qemu_put_byte(f, strlen(block->idstr));
262
        qemu_put_buffer(f, (uint8_t *)block->idstr,
263
                        strlen(block->idstr));
264
        size += 1 + strlen(block->idstr);
265
    }
266
    return size;
267
}
268

    
269
#define ENCODING_FLAG_XBZRLE 0x1
270

    
271
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
272
                            ram_addr_t current_addr, RAMBlock *block,
273
                            ram_addr_t offset, int cont, bool last_stage)
274
{
275
    int encoded_len = 0, bytes_sent = -1;
276
    uint8_t *prev_cached_page;
277

    
278
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
279
        if (!last_stage) {
280
            cache_insert(XBZRLE.cache, current_addr, current_data);
281
        }
282
        acct_info.xbzrle_cache_miss++;
283
        return -1;
284
    }
285

    
286
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
287

    
288
    /* save current buffer into memory */
289
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
290

    
291
    /* XBZRLE encoding (if there is no overflow) */
292
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
293
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
294
                                       TARGET_PAGE_SIZE);
295
    if (encoded_len == 0) {
296
        DPRINTF("Skipping unmodified page\n");
297
        return 0;
298
    } else if (encoded_len == -1) {
299
        DPRINTF("Overflow\n");
300
        acct_info.xbzrle_overflows++;
301
        /* update data in the cache */
302
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
303
        return -1;
304
    }
305

    
306
    /* we need to update the data in the cache, in order to get the same data */
307
    if (!last_stage) {
308
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
309
    }
310

    
311
    /* Send XBZRLE based compressed page */
312
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
313
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
314
    qemu_put_be16(f, encoded_len);
315
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
316
    bytes_sent += encoded_len + 1 + 2;
317
    acct_info.xbzrle_pages++;
318
    acct_info.xbzrle_bytes += bytes_sent;
319

    
320
    return bytes_sent;
321
}
322

    
323

    
324
/* This is the last block that we have visited serching for dirty pages
325
 */
326
static RAMBlock *last_seen_block;
327
/* This is the last block from where we have sent data */
328
static RAMBlock *last_sent_block;
329
static ram_addr_t last_offset;
330
static unsigned long *migration_bitmap;
331
static uint64_t migration_dirty_pages;
332
static uint32_t last_version;
333
static bool ram_bulk_stage;
334

    
335
static inline
336
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
337
                                                 ram_addr_t start)
338
{
339
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
340
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
341
    unsigned long size = base + (int128_get64(mr->size) >> TARGET_PAGE_BITS);
342

    
343
    unsigned long next;
344

    
345
    if (ram_bulk_stage && nr > base) {
346
        next = nr + 1;
347
    } else {
348
        next = find_next_bit(migration_bitmap, size, nr);
349
    }
350

    
351
    if (next < size) {
352
        clear_bit(next, migration_bitmap);
353
        migration_dirty_pages--;
354
    }
355
    return (next - base) << TARGET_PAGE_BITS;
356
}
357

    
358
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
359
                                              ram_addr_t offset)
360
{
361
    bool ret;
362
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
363

    
364
    ret = test_and_set_bit(nr, migration_bitmap);
365

    
366
    if (!ret) {
367
        migration_dirty_pages++;
368
    }
369
    return ret;
370
}
371

    
372
/* Needs iothread lock! */
373

    
374
static void migration_bitmap_sync(void)
375
{
376
    RAMBlock *block;
377
    ram_addr_t addr;
378
    uint64_t num_dirty_pages_init = migration_dirty_pages;
379
    MigrationState *s = migrate_get_current();
380
    static int64_t start_time;
381
    static int64_t num_dirty_pages_period;
382
    int64_t end_time;
383

    
384
    if (!start_time) {
385
        start_time = qemu_get_clock_ms(rt_clock);
386
    }
387

    
388
    trace_migration_bitmap_sync_start();
389
    memory_global_sync_dirty_bitmap(get_system_memory());
390

    
391
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
392
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
393
            if (memory_region_test_and_clear_dirty(block->mr,
394
                                                   addr, TARGET_PAGE_SIZE,
395
                                                   DIRTY_MEMORY_MIGRATION)) {
396
                migration_bitmap_set_dirty(block->mr, addr);
397
            }
398
        }
399
    }
400
    trace_migration_bitmap_sync_end(migration_dirty_pages
401
                                    - num_dirty_pages_init);
402
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
403
    end_time = qemu_get_clock_ms(rt_clock);
404

    
405
    /* more than 1 second = 1000 millisecons */
406
    if (end_time > start_time + 1000) {
407
        s->dirty_pages_rate = num_dirty_pages_period * 1000
408
            / (end_time - start_time);
409
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
410
        start_time = end_time;
411
        num_dirty_pages_period = 0;
412
    }
413
}
414

    
415
/*
416
 * ram_save_block: Writes a page of memory to the stream f
417
 *
418
 * Returns:  The number of bytes written.
419
 *           0 means no dirty pages
420
 */
421

    
422
static int ram_save_block(QEMUFile *f, bool last_stage)
423
{
424
    RAMBlock *block = last_seen_block;
425
    ram_addr_t offset = last_offset;
426
    bool complete_round = false;
427
    int bytes_sent = 0;
428
    MemoryRegion *mr;
429
    ram_addr_t current_addr;
430

    
431
    if (!block)
432
        block = QTAILQ_FIRST(&ram_list.blocks);
433

    
434
    while (true) {
435
        mr = block->mr;
436
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
437
        if (complete_round && block == last_seen_block &&
438
            offset >= last_offset) {
439
            break;
440
        }
441
        if (offset >= block->length) {
442
            offset = 0;
443
            block = QTAILQ_NEXT(block, next);
444
            if (!block) {
445
                block = QTAILQ_FIRST(&ram_list.blocks);
446
                complete_round = true;
447
                ram_bulk_stage = false;
448
            }
449
        } else {
450
            uint8_t *p;
451
            int cont = (block == last_sent_block) ?
452
                RAM_SAVE_FLAG_CONTINUE : 0;
453

    
454
            p = memory_region_get_ram_ptr(mr) + offset;
455

    
456
            /* In doubt sent page as normal */
457
            bytes_sent = -1;
458
            if (is_zero_page(p)) {
459
                acct_info.dup_pages++;
460
                if (!ram_bulk_stage) {
461
                    bytes_sent = save_block_hdr(f, block, offset, cont,
462
                                                RAM_SAVE_FLAG_COMPRESS);
463
                    qemu_put_byte(f, 0);
464
                    bytes_sent++;
465
                } else {
466
                    acct_info.skipped_pages++;
467
                    bytes_sent = 0;
468
                }
469
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
470
                current_addr = block->offset + offset;
471
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
472
                                              offset, cont, last_stage);
473
                if (!last_stage) {
474
                    p = get_cached_data(XBZRLE.cache, current_addr);
475
                }
476
            }
477

    
478
            /* XBZRLE overflow or normal page */
479
            if (bytes_sent == -1) {
480
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
481
                qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
482
                bytes_sent += TARGET_PAGE_SIZE;
483
                acct_info.norm_pages++;
484
            }
485

    
486
            /* if page is unmodified, continue to the next */
487
            if (bytes_sent > 0) {
488
                last_sent_block = block;
489
                break;
490
            }
491
        }
492
    }
493
    last_seen_block = block;
494
    last_offset = offset;
495

    
496
    return bytes_sent;
497
}
498

    
499
static uint64_t bytes_transferred;
500

    
501
static ram_addr_t ram_save_remaining(void)
502
{
503
    return migration_dirty_pages;
504
}
505

    
506
uint64_t ram_bytes_remaining(void)
507
{
508
    return ram_save_remaining() * TARGET_PAGE_SIZE;
509
}
510

    
511
uint64_t ram_bytes_transferred(void)
512
{
513
    return bytes_transferred;
514
}
515

    
516
uint64_t ram_bytes_total(void)
517
{
518
    RAMBlock *block;
519
    uint64_t total = 0;
520

    
521
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
522
        total += block->length;
523

    
524
    return total;
525
}
526

    
527
static void migration_end(void)
528
{
529
    if (migration_bitmap) {
530
        memory_global_dirty_log_stop();
531
        g_free(migration_bitmap);
532
        migration_bitmap = NULL;
533
    }
534

    
535
    if (XBZRLE.cache) {
536
        cache_fini(XBZRLE.cache);
537
        g_free(XBZRLE.cache);
538
        g_free(XBZRLE.encoded_buf);
539
        g_free(XBZRLE.current_buf);
540
        g_free(XBZRLE.decoded_buf);
541
        XBZRLE.cache = NULL;
542
    }
543
}
544

    
545
static void ram_migration_cancel(void *opaque)
546
{
547
    migration_end();
548
}
549

    
550
static void reset_ram_globals(void)
551
{
552
    last_seen_block = NULL;
553
    last_sent_block = NULL;
554
    last_offset = 0;
555
    last_version = ram_list.version;
556
    ram_bulk_stage = true;
557
}
558

    
559
#define MAX_WAIT 50 /* ms, half buffered_file limit */
560

    
561
static int ram_save_setup(QEMUFile *f, void *opaque)
562
{
563
    RAMBlock *block;
564
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
565

    
566
    migration_bitmap = bitmap_new(ram_pages);
567
    bitmap_set(migration_bitmap, 0, ram_pages);
568
    migration_dirty_pages = ram_pages;
569

    
570
    if (migrate_use_xbzrle()) {
571
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
572
                                  TARGET_PAGE_SIZE,
573
                                  TARGET_PAGE_SIZE);
574
        if (!XBZRLE.cache) {
575
            DPRINTF("Error creating cache\n");
576
            return -1;
577
        }
578
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
579
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
580
        acct_clear();
581
    }
582

    
583
    qemu_mutex_lock_iothread();
584
    qemu_mutex_lock_ramlist();
585
    bytes_transferred = 0;
586
    reset_ram_globals();
587

    
588
    memory_global_dirty_log_start();
589
    migration_bitmap_sync();
590
    qemu_mutex_unlock_iothread();
591

    
592
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
593

    
594
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
595
        qemu_put_byte(f, strlen(block->idstr));
596
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
597
        qemu_put_be64(f, block->length);
598
    }
599

    
600
    qemu_mutex_unlock_ramlist();
601
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
602

    
603
    return 0;
604
}
605

    
606
static int ram_save_iterate(QEMUFile *f, void *opaque)
607
{
608
    int ret;
609
    int i;
610
    int64_t t0;
611
    int total_sent = 0;
612

    
613
    qemu_mutex_lock_ramlist();
614

    
615
    if (ram_list.version != last_version) {
616
        reset_ram_globals();
617
    }
618

    
619
    t0 = qemu_get_clock_ns(rt_clock);
620
    i = 0;
621
    while ((ret = qemu_file_rate_limit(f)) == 0) {
622
        int bytes_sent;
623

    
624
        bytes_sent = ram_save_block(f, false);
625
        /* no more blocks to sent */
626
        if (bytes_sent == 0) {
627
            break;
628
        }
629
        total_sent += bytes_sent;
630
        acct_info.iterations++;
631
        /* we want to check in the 1st loop, just in case it was the 1st time
632
           and we had to sync the dirty bitmap.
633
           qemu_get_clock_ns() is a bit expensive, so we only check each some
634
           iterations
635
        */
636
        if ((i & 63) == 0) {
637
            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - t0) / 1000000;
638
            if (t1 > MAX_WAIT) {
639
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
640
                        t1, i);
641
                break;
642
            }
643
        }
644
        i++;
645
    }
646

    
647
    qemu_mutex_unlock_ramlist();
648

    
649
    if (ret < 0) {
650
        bytes_transferred += total_sent;
651
        return ret;
652
    }
653

    
654
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
655
    total_sent += 8;
656
    bytes_transferred += total_sent;
657

    
658
    return total_sent;
659
}
660

    
661
static int ram_save_complete(QEMUFile *f, void *opaque)
662
{
663
    qemu_mutex_lock_ramlist();
664
    migration_bitmap_sync();
665

    
666
    /* try transferring iterative blocks of memory */
667

    
668
    /* flush all remaining blocks regardless of rate limiting */
669
    while (true) {
670
        int bytes_sent;
671

    
672
        bytes_sent = ram_save_block(f, true);
673
        /* no more blocks to sent */
674
        if (bytes_sent == 0) {
675
            break;
676
        }
677
        bytes_transferred += bytes_sent;
678
    }
679
    migration_end();
680

    
681
    qemu_mutex_unlock_ramlist();
682
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
683

    
684
    return 0;
685
}
686

    
687
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
688
{
689
    uint64_t remaining_size;
690

    
691
    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
692

    
693
    if (remaining_size < max_size) {
694
        qemu_mutex_lock_iothread();
695
        migration_bitmap_sync();
696
        qemu_mutex_unlock_iothread();
697
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
698
    }
699
    return remaining_size;
700
}
701

    
702
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
703
{
704
    int ret, rc = 0;
705
    unsigned int xh_len;
706
    int xh_flags;
707

    
708
    if (!XBZRLE.decoded_buf) {
709
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
710
    }
711

    
712
    /* extract RLE header */
713
    xh_flags = qemu_get_byte(f);
714
    xh_len = qemu_get_be16(f);
715

    
716
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
717
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
718
        return -1;
719
    }
720

    
721
    if (xh_len > TARGET_PAGE_SIZE) {
722
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
723
        return -1;
724
    }
725
    /* load data and decode */
726
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
727

    
728
    /* decode RLE */
729
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
730
                               TARGET_PAGE_SIZE);
731
    if (ret == -1) {
732
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
733
        rc = -1;
734
    } else  if (ret > TARGET_PAGE_SIZE) {
735
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
736
                ret, TARGET_PAGE_SIZE);
737
        abort();
738
    }
739

    
740
    return rc;
741
}
742

    
743
static inline void *host_from_stream_offset(QEMUFile *f,
744
                                            ram_addr_t offset,
745
                                            int flags)
746
{
747
    static RAMBlock *block = NULL;
748
    char id[256];
749
    uint8_t len;
750

    
751
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
752
        if (!block) {
753
            fprintf(stderr, "Ack, bad migration stream!\n");
754
            return NULL;
755
        }
756

    
757
        return memory_region_get_ram_ptr(block->mr) + offset;
758
    }
759

    
760
    len = qemu_get_byte(f);
761
    qemu_get_buffer(f, (uint8_t *)id, len);
762
    id[len] = 0;
763

    
764
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
765
        if (!strncmp(id, block->idstr, sizeof(id)))
766
            return memory_region_get_ram_ptr(block->mr) + offset;
767
    }
768

    
769
    fprintf(stderr, "Can't find block %s!\n", id);
770
    return NULL;
771
}
772

    
773
static int ram_load(QEMUFile *f, void *opaque, int version_id)
774
{
775
    ram_addr_t addr;
776
    int flags, ret = 0;
777
    int error;
778
    static uint64_t seq_iter;
779

    
780
    seq_iter++;
781

    
782
    if (version_id < 4 || version_id > 4) {
783
        return -EINVAL;
784
    }
785

    
786
    do {
787
        addr = qemu_get_be64(f);
788

    
789
        flags = addr & ~TARGET_PAGE_MASK;
790
        addr &= TARGET_PAGE_MASK;
791

    
792
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
793
            if (version_id == 4) {
794
                /* Synchronize RAM block list */
795
                char id[256];
796
                ram_addr_t length;
797
                ram_addr_t total_ram_bytes = addr;
798

    
799
                while (total_ram_bytes) {
800
                    RAMBlock *block;
801
                    uint8_t len;
802

    
803
                    len = qemu_get_byte(f);
804
                    qemu_get_buffer(f, (uint8_t *)id, len);
805
                    id[len] = 0;
806
                    length = qemu_get_be64(f);
807

    
808
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
809
                        if (!strncmp(id, block->idstr, sizeof(id))) {
810
                            if (block->length != length) {
811
                                ret =  -EINVAL;
812
                                goto done;
813
                            }
814
                            break;
815
                        }
816
                    }
817

    
818
                    if (!block) {
819
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
820
                                "accept migration\n", id);
821
                        ret = -EINVAL;
822
                        goto done;
823
                    }
824

    
825
                    total_ram_bytes -= length;
826
                }
827
            }
828
        }
829

    
830
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
831
            void *host;
832
            uint8_t ch;
833

    
834
            host = host_from_stream_offset(f, addr, flags);
835
            if (!host) {
836
                return -EINVAL;
837
            }
838

    
839
            ch = qemu_get_byte(f);
840
            memset(host, ch, TARGET_PAGE_SIZE);
841
#ifndef _WIN32
842
            if (ch == 0 &&
843
                (!kvm_enabled() || kvm_has_sync_mmu()) &&
844
                getpagesize() <= TARGET_PAGE_SIZE) {
845
                qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
846
            }
847
#endif
848
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
849
            void *host;
850

    
851
            host = host_from_stream_offset(f, addr, flags);
852
            if (!host) {
853
                return -EINVAL;
854
            }
855

    
856
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
857
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
858
            void *host = host_from_stream_offset(f, addr, flags);
859
            if (!host) {
860
                return -EINVAL;
861
            }
862

    
863
            if (load_xbzrle(f, addr, host) < 0) {
864
                ret = -EINVAL;
865
                goto done;
866
            }
867
        }
868
        error = qemu_file_get_error(f);
869
        if (error) {
870
            ret = error;
871
            goto done;
872
        }
873
    } while (!(flags & RAM_SAVE_FLAG_EOS));
874

    
875
done:
876
    DPRINTF("Completed load of VM with exit code %d seq iteration "
877
            "%" PRIu64 "\n", ret, seq_iter);
878
    return ret;
879
}
880

    
881
SaveVMHandlers savevm_ram_handlers = {
882
    .save_live_setup = ram_save_setup,
883
    .save_live_iterate = ram_save_iterate,
884
    .save_live_complete = ram_save_complete,
885
    .save_live_pending = ram_save_pending,
886
    .load_state = ram_load,
887
    .cancel = ram_migration_cancel,
888
};
889

    
890
#ifdef HAS_AUDIO
891
struct soundhw {
892
    const char *name;
893
    const char *descr;
894
    int enabled;
895
    int isa;
896
    union {
897
        int (*init_isa) (ISABus *bus);
898
        int (*init_pci) (PCIBus *bus);
899
    } init;
900
};
901

    
902
static struct soundhw soundhw[9];
903
static int soundhw_count;
904

    
905
void isa_register_soundhw(const char *name, const char *descr,
906
                          int (*init_isa)(ISABus *bus))
907
{
908
    assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
909
    soundhw[soundhw_count].name = name;
910
    soundhw[soundhw_count].descr = descr;
911
    soundhw[soundhw_count].isa = 1;
912
    soundhw[soundhw_count].init.init_isa = init_isa;
913
    soundhw_count++;
914
}
915

    
916
void pci_register_soundhw(const char *name, const char *descr,
917
                          int (*init_pci)(PCIBus *bus))
918
{
919
    assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
920
    soundhw[soundhw_count].name = name;
921
    soundhw[soundhw_count].descr = descr;
922
    soundhw[soundhw_count].isa = 0;
923
    soundhw[soundhw_count].init.init_pci = init_pci;
924
    soundhw_count++;
925
}
926

    
927
void select_soundhw(const char *optarg)
928
{
929
    struct soundhw *c;
930

    
931
    if (is_help_option(optarg)) {
932
    show_valid_cards:
933

    
934
        if (soundhw_count) {
935
             printf("Valid sound card names (comma separated):\n");
936
             for (c = soundhw; c->name; ++c) {
937
                 printf ("%-11s %s\n", c->name, c->descr);
938
             }
939
             printf("\n-soundhw all will enable all of the above\n");
940
        } else {
941
             printf("Machine has no user-selectable audio hardware "
942
                    "(it may or may not have always-present audio hardware).\n");
943
        }
944
        exit(!is_help_option(optarg));
945
    }
946
    else {
947
        size_t l;
948
        const char *p;
949
        char *e;
950
        int bad_card = 0;
951

    
952
        if (!strcmp(optarg, "all")) {
953
            for (c = soundhw; c->name; ++c) {
954
                c->enabled = 1;
955
            }
956
            return;
957
        }
958

    
959
        p = optarg;
960
        while (*p) {
961
            e = strchr(p, ',');
962
            l = !e ? strlen(p) : (size_t) (e - p);
963

    
964
            for (c = soundhw; c->name; ++c) {
965
                if (!strncmp(c->name, p, l) && !c->name[l]) {
966
                    c->enabled = 1;
967
                    break;
968
                }
969
            }
970

    
971
            if (!c->name) {
972
                if (l > 80) {
973
                    fprintf(stderr,
974
                            "Unknown sound card name (too big to show)\n");
975
                }
976
                else {
977
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
978
                            (int) l, p);
979
                }
980
                bad_card = 1;
981
            }
982
            p += l + (e != NULL);
983
        }
984

    
985
        if (bad_card) {
986
            goto show_valid_cards;
987
        }
988
    }
989
}
990

    
991
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
992
{
993
    struct soundhw *c;
994

    
995
    for (c = soundhw; c->name; ++c) {
996
        if (c->enabled) {
997
            if (c->isa) {
998
                if (isa_bus) {
999
                    c->init.init_isa(isa_bus);
1000
                }
1001
            } else {
1002
                if (pci_bus) {
1003
                    c->init.init_pci(pci_bus);
1004
                }
1005
            }
1006
        }
1007
    }
1008
}
1009
#else
1010
void select_soundhw(const char *optarg)
1011
{
1012
}
1013
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1014
{
1015
}
1016
#endif
1017

    
1018
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1019
{
1020
    int ret;
1021

    
1022
    if (strlen(str) != 36) {
1023
        return -1;
1024
    }
1025

    
1026
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1027
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1028
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1029
                 &uuid[15]);
1030

    
1031
    if (ret != 16) {
1032
        return -1;
1033
    }
1034
#ifdef TARGET_I386
1035
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
1036
#endif
1037
    return 0;
1038
}
1039

    
1040
void do_acpitable_option(const QemuOpts *opts)
1041
{
1042
#ifdef TARGET_I386
1043
    Error *err = NULL;
1044

    
1045
    acpi_table_add(opts, &err);
1046
    if (err) {
1047
        fprintf(stderr, "Wrong acpi table provided: %s\n",
1048
                error_get_pretty(err));
1049
        error_free(err);
1050
        exit(1);
1051
    }
1052
#endif
1053
}
1054

    
1055
void do_smbios_option(const char *optarg)
1056
{
1057
#ifdef TARGET_I386
1058
    if (smbios_entry_add(optarg) < 0) {
1059
        fprintf(stderr, "Wrong smbios provided\n");
1060
        exit(1);
1061
    }
1062
#endif
1063
}
1064

    
1065
void cpudef_init(void)
1066
{
1067
#if defined(cpudef_setup)
1068
    cpudef_setup(); /* parse cpu definitions in target config file */
1069
#endif
1070
}
1071

    
1072
int audio_available(void)
1073
{
1074
#ifdef HAS_AUDIO
1075
    return 1;
1076
#else
1077
    return 0;
1078
#endif
1079
}
1080

    
1081
int tcg_available(void)
1082
{
1083
    return 1;
1084
}
1085

    
1086
int kvm_available(void)
1087
{
1088
#ifdef CONFIG_KVM
1089
    return 1;
1090
#else
1091
    return 0;
1092
#endif
1093
}
1094

    
1095
int xen_available(void)
1096
{
1097
#ifdef CONFIG_XEN
1098
    return 1;
1099
#else
1100
    return 0;
1101
#endif
1102
}
1103

    
1104

    
1105
TargetInfo *qmp_query_target(Error **errp)
1106
{
1107
    TargetInfo *info = g_malloc0(sizeof(*info));
1108

    
1109
    info->arch = TARGET_TYPE;
1110

    
1111
    return info;
1112
}