Statistics
| Branch: | Revision:

root / arch_init.c @ 5cc11c46

History | View | Annotate | Download (29.2 kB)

1 ad96090a Blue Swirl
/*
2 ad96090a Blue Swirl
 * QEMU System Emulator
3 ad96090a Blue Swirl
 *
4 ad96090a Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 ad96090a Blue Swirl
 *
6 ad96090a Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 ad96090a Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 ad96090a Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 ad96090a Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 ad96090a Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 ad96090a Blue Swirl
 * furnished to do so, subject to the following conditions:
12 ad96090a Blue Swirl
 *
13 ad96090a Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 ad96090a Blue Swirl
 * all copies or substantial portions of the Software.
15 ad96090a Blue Swirl
 *
16 ad96090a Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 ad96090a Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 ad96090a Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 ad96090a Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 ad96090a Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 ad96090a Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 ad96090a Blue Swirl
 * THE SOFTWARE.
23 ad96090a Blue Swirl
 */
24 ad96090a Blue Swirl
#include <stdint.h>
25 ad96090a Blue Swirl
#include <stdarg.h>
26 b2e0a138 Michael S. Tsirkin
#include <stdlib.h>
27 ad96090a Blue Swirl
#ifndef _WIN32
28 1c47cb16 Blue Swirl
#include <sys/types.h>
29 ad96090a Blue Swirl
#include <sys/mman.h>
30 ad96090a Blue Swirl
#endif
31 ad96090a Blue Swirl
#include "config.h"
32 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
34 1de7afc9 Paolo Bonzini
#include "qemu/bitops.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/arch_init.h"
37 ad96090a Blue Swirl
#include "audio/audio.h"
38 ad96090a Blue Swirl
#include "hw/pc.h"
39 a2cb15b0 Michael S. Tsirkin
#include "hw/pci/pci.h"
40 ad96090a Blue Swirl
#include "hw/audiodev.h"
41 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
42 caf71f86 Paolo Bonzini
#include "migration/migration.h"
43 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
44 ad96090a Blue Swirl
#include "hw/smbios.h"
45 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
46 302fe51b Jan Kiszka
#include "hw/pcspk.h"
47 caf71f86 Paolo Bonzini
#include "migration/page_cache.h"
48 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
49 99afc91d Daniel P. Berrange
#include "qmp-commands.h"
50 3c12193d Juan Quintela
#include "trace.h"
51 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
52 ad96090a Blue Swirl
53 3a697f69 Orit Wasserman
#ifdef DEBUG_ARCH_INIT
54 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
55 3a697f69 Orit Wasserman
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
56 3a697f69 Orit Wasserman
#else
57 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
58 3a697f69 Orit Wasserman
    do { } while (0)
59 3a697f69 Orit Wasserman
#endif
60 3a697f69 Orit Wasserman
61 ad96090a Blue Swirl
#ifdef TARGET_SPARC
62 ad96090a Blue Swirl
int graphic_width = 1024;
63 ad96090a Blue Swirl
int graphic_height = 768;
64 ad96090a Blue Swirl
int graphic_depth = 8;
65 ad96090a Blue Swirl
#else
66 ad96090a Blue Swirl
int graphic_width = 800;
67 ad96090a Blue Swirl
int graphic_height = 600;
68 ad96090a Blue Swirl
int graphic_depth = 15;
69 ad96090a Blue Swirl
#endif
70 ad96090a Blue Swirl
71 ad96090a Blue Swirl
72 ad96090a Blue Swirl
#if defined(TARGET_ALPHA)
73 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ALPHA
74 ad96090a Blue Swirl
#elif defined(TARGET_ARM)
75 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ARM
76 ad96090a Blue Swirl
#elif defined(TARGET_CRIS)
77 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_CRIS
78 ad96090a Blue Swirl
#elif defined(TARGET_I386)
79 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_I386
80 ad96090a Blue Swirl
#elif defined(TARGET_M68K)
81 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_M68K
82 81ea0e13 Michael Walle
#elif defined(TARGET_LM32)
83 81ea0e13 Michael Walle
#define QEMU_ARCH QEMU_ARCH_LM32
84 ad96090a Blue Swirl
#elif defined(TARGET_MICROBLAZE)
85 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
86 ad96090a Blue Swirl
#elif defined(TARGET_MIPS)
87 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MIPS
88 d15a9c23 Anthony Green
#elif defined(TARGET_MOXIE)
89 d15a9c23 Anthony Green
#define QEMU_ARCH QEMU_ARCH_MOXIE
90 e67db06e Jia Liu
#elif defined(TARGET_OPENRISC)
91 e67db06e Jia Liu
#define QEMU_ARCH QEMU_ARCH_OPENRISC
92 ad96090a Blue Swirl
#elif defined(TARGET_PPC)
93 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_PPC
94 ad96090a Blue Swirl
#elif defined(TARGET_S390X)
95 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_S390X
96 ad96090a Blue Swirl
#elif defined(TARGET_SH4)
97 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SH4
98 ad96090a Blue Swirl
#elif defined(TARGET_SPARC)
99 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SPARC
100 2328826b Max Filippov
#elif defined(TARGET_XTENSA)
101 2328826b Max Filippov
#define QEMU_ARCH QEMU_ARCH_XTENSA
102 4f23a1e6 Guan Xuetao
#elif defined(TARGET_UNICORE32)
103 4f23a1e6 Guan Xuetao
#define QEMU_ARCH QEMU_ARCH_UNICORE32
104 ad96090a Blue Swirl
#endif
105 ad96090a Blue Swirl
106 ad96090a Blue Swirl
const uint32_t arch_type = QEMU_ARCH;
107 ad96090a Blue Swirl
108 ad96090a Blue Swirl
/***********************************************************/
109 ad96090a Blue Swirl
/* ram save/restore */
110 ad96090a Blue Swirl
111 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
112 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_COMPRESS 0x02
113 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
114 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_PAGE     0x08
115 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_EOS      0x10
116 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_CONTINUE 0x20
117 17ad9b35 Orit Wasserman
#define RAM_SAVE_FLAG_XBZRLE   0x40
118 ad96090a Blue Swirl
119 b5a8fe5e Eduardo Habkost
120 756557de Eduardo Habkost
static struct defconfig_file {
121 756557de Eduardo Habkost
    const char *filename;
122 f29a5614 Eduardo Habkost
    /* Indicates it is an user config file (disabled by -no-user-config) */
123 f29a5614 Eduardo Habkost
    bool userconfig;
124 756557de Eduardo Habkost
} default_config_files[] = {
125 f29a5614 Eduardo Habkost
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
126 f29a5614 Eduardo Habkost
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
127 756557de Eduardo Habkost
    { NULL }, /* end of list */
128 756557de Eduardo Habkost
};
129 756557de Eduardo Habkost
130 756557de Eduardo Habkost
131 f29a5614 Eduardo Habkost
int qemu_read_default_config_files(bool userconfig)
132 b5a8fe5e Eduardo Habkost
{
133 b5a8fe5e Eduardo Habkost
    int ret;
134 756557de Eduardo Habkost
    struct defconfig_file *f;
135 b5a8fe5e Eduardo Habkost
136 756557de Eduardo Habkost
    for (f = default_config_files; f->filename; f++) {
137 f29a5614 Eduardo Habkost
        if (!userconfig && f->userconfig) {
138 f29a5614 Eduardo Habkost
            continue;
139 f29a5614 Eduardo Habkost
        }
140 756557de Eduardo Habkost
        ret = qemu_read_config_file(f->filename);
141 756557de Eduardo Habkost
        if (ret < 0 && ret != -ENOENT) {
142 756557de Eduardo Habkost
            return ret;
143 756557de Eduardo Habkost
        }
144 b5a8fe5e Eduardo Habkost
    }
145 756557de Eduardo Habkost
    
146 b5a8fe5e Eduardo Habkost
    return 0;
147 b5a8fe5e Eduardo Habkost
}
148 b5a8fe5e Eduardo Habkost
149 3edcd7e6 Peter Lieven
static inline bool is_zero_page(uint8_t *p)
150 ad96090a Blue Swirl
{
151 3edcd7e6 Peter Lieven
    return buffer_find_nonzero_offset(p, TARGET_PAGE_SIZE) ==
152 3edcd7e6 Peter Lieven
        TARGET_PAGE_SIZE;
153 ad96090a Blue Swirl
}
154 ad96090a Blue Swirl
155 17ad9b35 Orit Wasserman
/* struct contains XBZRLE cache and a static page
156 17ad9b35 Orit Wasserman
   used by the compression */
157 17ad9b35 Orit Wasserman
static struct {
158 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE encoding */
159 17ad9b35 Orit Wasserman
    uint8_t *encoded_buf;
160 17ad9b35 Orit Wasserman
    /* buffer for storing page content */
161 17ad9b35 Orit Wasserman
    uint8_t *current_buf;
162 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE decoding */
163 17ad9b35 Orit Wasserman
    uint8_t *decoded_buf;
164 17ad9b35 Orit Wasserman
    /* Cache for XBZRLE */
165 17ad9b35 Orit Wasserman
    PageCache *cache;
166 17ad9b35 Orit Wasserman
} XBZRLE = {
167 17ad9b35 Orit Wasserman
    .encoded_buf = NULL,
168 17ad9b35 Orit Wasserman
    .current_buf = NULL,
169 17ad9b35 Orit Wasserman
    .decoded_buf = NULL,
170 17ad9b35 Orit Wasserman
    .cache = NULL,
171 17ad9b35 Orit Wasserman
};
172 17ad9b35 Orit Wasserman
173 9e1ba4cc Orit Wasserman
174 9e1ba4cc Orit Wasserman
int64_t xbzrle_cache_resize(int64_t new_size)
175 9e1ba4cc Orit Wasserman
{
176 9e1ba4cc Orit Wasserman
    if (XBZRLE.cache != NULL) {
177 9e1ba4cc Orit Wasserman
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
178 9e1ba4cc Orit Wasserman
            TARGET_PAGE_SIZE;
179 9e1ba4cc Orit Wasserman
    }
180 9e1ba4cc Orit Wasserman
    return pow2floor(new_size);
181 9e1ba4cc Orit Wasserman
}
182 9e1ba4cc Orit Wasserman
183 004d4c10 Orit Wasserman
/* accounting for migration statistics */
184 004d4c10 Orit Wasserman
typedef struct AccountingInfo {
185 004d4c10 Orit Wasserman
    uint64_t dup_pages;
186 f1c72795 Peter Lieven
    uint64_t skipped_pages;
187 004d4c10 Orit Wasserman
    uint64_t norm_pages;
188 004d4c10 Orit Wasserman
    uint64_t iterations;
189 f36d55af Orit Wasserman
    uint64_t xbzrle_bytes;
190 f36d55af Orit Wasserman
    uint64_t xbzrle_pages;
191 f36d55af Orit Wasserman
    uint64_t xbzrle_cache_miss;
192 f36d55af Orit Wasserman
    uint64_t xbzrle_overflows;
193 004d4c10 Orit Wasserman
} AccountingInfo;
194 004d4c10 Orit Wasserman
195 004d4c10 Orit Wasserman
static AccountingInfo acct_info;
196 004d4c10 Orit Wasserman
197 004d4c10 Orit Wasserman
static void acct_clear(void)
198 004d4c10 Orit Wasserman
{
199 004d4c10 Orit Wasserman
    memset(&acct_info, 0, sizeof(acct_info));
200 004d4c10 Orit Wasserman
}
201 004d4c10 Orit Wasserman
202 004d4c10 Orit Wasserman
uint64_t dup_mig_bytes_transferred(void)
203 004d4c10 Orit Wasserman
{
204 004d4c10 Orit Wasserman
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
205 004d4c10 Orit Wasserman
}
206 004d4c10 Orit Wasserman
207 004d4c10 Orit Wasserman
uint64_t dup_mig_pages_transferred(void)
208 004d4c10 Orit Wasserman
{
209 004d4c10 Orit Wasserman
    return acct_info.dup_pages;
210 004d4c10 Orit Wasserman
}
211 004d4c10 Orit Wasserman
212 f1c72795 Peter Lieven
uint64_t skipped_mig_bytes_transferred(void)
213 f1c72795 Peter Lieven
{
214 f1c72795 Peter Lieven
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
215 f1c72795 Peter Lieven
}
216 f1c72795 Peter Lieven
217 f1c72795 Peter Lieven
uint64_t skipped_mig_pages_transferred(void)
218 f1c72795 Peter Lieven
{
219 f1c72795 Peter Lieven
    return acct_info.skipped_pages;
220 f1c72795 Peter Lieven
}
221 f1c72795 Peter Lieven
222 004d4c10 Orit Wasserman
uint64_t norm_mig_bytes_transferred(void)
223 004d4c10 Orit Wasserman
{
224 004d4c10 Orit Wasserman
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
225 004d4c10 Orit Wasserman
}
226 004d4c10 Orit Wasserman
227 004d4c10 Orit Wasserman
uint64_t norm_mig_pages_transferred(void)
228 004d4c10 Orit Wasserman
{
229 004d4c10 Orit Wasserman
    return acct_info.norm_pages;
230 004d4c10 Orit Wasserman
}
231 004d4c10 Orit Wasserman
232 f36d55af Orit Wasserman
uint64_t xbzrle_mig_bytes_transferred(void)
233 f36d55af Orit Wasserman
{
234 f36d55af Orit Wasserman
    return acct_info.xbzrle_bytes;
235 f36d55af Orit Wasserman
}
236 f36d55af Orit Wasserman
237 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_transferred(void)
238 f36d55af Orit Wasserman
{
239 f36d55af Orit Wasserman
    return acct_info.xbzrle_pages;
240 f36d55af Orit Wasserman
}
241 f36d55af Orit Wasserman
242 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_cache_miss(void)
243 f36d55af Orit Wasserman
{
244 f36d55af Orit Wasserman
    return acct_info.xbzrle_cache_miss;
245 f36d55af Orit Wasserman
}
246 f36d55af Orit Wasserman
247 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_overflow(void)
248 f36d55af Orit Wasserman
{
249 f36d55af Orit Wasserman
    return acct_info.xbzrle_overflows;
250 f36d55af Orit Wasserman
}
251 f36d55af Orit Wasserman
252 3f7d7b09 Juan Quintela
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
253 3f7d7b09 Juan Quintela
                             int cont, int flag)
254 0c51f43d Orit Wasserman
{
255 3f7d7b09 Juan Quintela
    size_t size;
256 3f7d7b09 Juan Quintela
257 3f7d7b09 Juan Quintela
    qemu_put_be64(f, offset | cont | flag);
258 3f7d7b09 Juan Quintela
    size = 8;
259 0c51f43d Orit Wasserman
260 3f7d7b09 Juan Quintela
    if (!cont) {
261 3f7d7b09 Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
262 3f7d7b09 Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr,
263 3f7d7b09 Juan Quintela
                        strlen(block->idstr));
264 3f7d7b09 Juan Quintela
        size += 1 + strlen(block->idstr);
265 3f7d7b09 Juan Quintela
    }
266 3f7d7b09 Juan Quintela
    return size;
267 0c51f43d Orit Wasserman
}
268 0c51f43d Orit Wasserman
269 17ad9b35 Orit Wasserman
#define ENCODING_FLAG_XBZRLE 0x1
270 17ad9b35 Orit Wasserman
271 17ad9b35 Orit Wasserman
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
272 17ad9b35 Orit Wasserman
                            ram_addr_t current_addr, RAMBlock *block,
273 dd051c72 Juan Quintela
                            ram_addr_t offset, int cont, bool last_stage)
274 17ad9b35 Orit Wasserman
{
275 17ad9b35 Orit Wasserman
    int encoded_len = 0, bytes_sent = -1;
276 17ad9b35 Orit Wasserman
    uint8_t *prev_cached_page;
277 17ad9b35 Orit Wasserman
278 17ad9b35 Orit Wasserman
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
279 dd051c72 Juan Quintela
        if (!last_stage) {
280 ee0b44aa Peter Lieven
            cache_insert(XBZRLE.cache, current_addr, current_data);
281 dd051c72 Juan Quintela
        }
282 f36d55af Orit Wasserman
        acct_info.xbzrle_cache_miss++;
283 17ad9b35 Orit Wasserman
        return -1;
284 17ad9b35 Orit Wasserman
    }
285 17ad9b35 Orit Wasserman
286 17ad9b35 Orit Wasserman
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
287 17ad9b35 Orit Wasserman
288 17ad9b35 Orit Wasserman
    /* save current buffer into memory */
289 17ad9b35 Orit Wasserman
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
290 17ad9b35 Orit Wasserman
291 17ad9b35 Orit Wasserman
    /* XBZRLE encoding (if there is no overflow) */
292 17ad9b35 Orit Wasserman
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
293 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
294 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE);
295 17ad9b35 Orit Wasserman
    if (encoded_len == 0) {
296 17ad9b35 Orit Wasserman
        DPRINTF("Skipping unmodified page\n");
297 17ad9b35 Orit Wasserman
        return 0;
298 17ad9b35 Orit Wasserman
    } else if (encoded_len == -1) {
299 17ad9b35 Orit Wasserman
        DPRINTF("Overflow\n");
300 f36d55af Orit Wasserman
        acct_info.xbzrle_overflows++;
301 17ad9b35 Orit Wasserman
        /* update data in the cache */
302 17ad9b35 Orit Wasserman
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
303 17ad9b35 Orit Wasserman
        return -1;
304 17ad9b35 Orit Wasserman
    }
305 17ad9b35 Orit Wasserman
306 17ad9b35 Orit Wasserman
    /* we need to update the data in the cache, in order to get the same data */
307 dd051c72 Juan Quintela
    if (!last_stage) {
308 dd051c72 Juan Quintela
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
309 dd051c72 Juan Quintela
    }
310 17ad9b35 Orit Wasserman
311 17ad9b35 Orit Wasserman
    /* Send XBZRLE based compressed page */
312 3f7d7b09 Juan Quintela
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
313 17ad9b35 Orit Wasserman
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
314 17ad9b35 Orit Wasserman
    qemu_put_be16(f, encoded_len);
315 17ad9b35 Orit Wasserman
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
316 3f7d7b09 Juan Quintela
    bytes_sent += encoded_len + 1 + 2;
317 f36d55af Orit Wasserman
    acct_info.xbzrle_pages++;
318 f36d55af Orit Wasserman
    acct_info.xbzrle_bytes += bytes_sent;
319 17ad9b35 Orit Wasserman
320 17ad9b35 Orit Wasserman
    return bytes_sent;
321 17ad9b35 Orit Wasserman
}
322 17ad9b35 Orit Wasserman
323 b23a9a5c Juan Quintela
324 b23a9a5c Juan Quintela
/* This is the last block that we have visited serching for dirty pages
325 b23a9a5c Juan Quintela
 */
326 b23a9a5c Juan Quintela
static RAMBlock *last_seen_block;
327 5f718a15 Juan Quintela
/* This is the last block from where we have sent data */
328 5f718a15 Juan Quintela
static RAMBlock *last_sent_block;
329 760e77ea Alex Williamson
static ram_addr_t last_offset;
330 c6bf8e0e Juan Quintela
static unsigned long *migration_bitmap;
331 c6bf8e0e Juan Quintela
static uint64_t migration_dirty_pages;
332 f798b07f Umesh Deshpande
static uint32_t last_version;
333 78d07ae7 Peter Lieven
static bool ram_bulk_stage;
334 760e77ea Alex Williamson
335 4c8ae0f6 Juan Quintela
static inline
336 4c8ae0f6 Juan Quintela
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
337 4c8ae0f6 Juan Quintela
                                                 ram_addr_t start)
338 69268cde Juan Quintela
{
339 4c8ae0f6 Juan Quintela
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
340 4c8ae0f6 Juan Quintela
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
341 4c8ae0f6 Juan Quintela
    unsigned long size = base + (int128_get64(mr->size) >> TARGET_PAGE_BITS);
342 c6bf8e0e Juan Quintela
343 70c8652b Peter Lieven
    unsigned long next;
344 70c8652b Peter Lieven
345 70c8652b Peter Lieven
    if (ram_bulk_stage && nr > base) {
346 70c8652b Peter Lieven
        next = nr + 1;
347 70c8652b Peter Lieven
    } else {
348 70c8652b Peter Lieven
        next = find_next_bit(migration_bitmap, size, nr);
349 70c8652b Peter Lieven
    }
350 69268cde Juan Quintela
351 4c8ae0f6 Juan Quintela
    if (next < size) {
352 4c8ae0f6 Juan Quintela
        clear_bit(next, migration_bitmap);
353 c6bf8e0e Juan Quintela
        migration_dirty_pages--;
354 69268cde Juan Quintela
    }
355 4c8ae0f6 Juan Quintela
    return (next - base) << TARGET_PAGE_BITS;
356 69268cde Juan Quintela
}
357 69268cde Juan Quintela
358 c6bf8e0e Juan Quintela
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
359 c6bf8e0e Juan Quintela
                                              ram_addr_t offset)
360 e44d26c8 Juan Quintela
{
361 c6bf8e0e Juan Quintela
    bool ret;
362 c6bf8e0e Juan Quintela
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
363 e44d26c8 Juan Quintela
364 c6bf8e0e Juan Quintela
    ret = test_and_set_bit(nr, migration_bitmap);
365 c6bf8e0e Juan Quintela
366 c6bf8e0e Juan Quintela
    if (!ret) {
367 c6bf8e0e Juan Quintela
        migration_dirty_pages++;
368 e44d26c8 Juan Quintela
    }
369 c6bf8e0e Juan Quintela
    return ret;
370 e44d26c8 Juan Quintela
}
371 e44d26c8 Juan Quintela
372 32c835ba Paolo Bonzini
/* Needs iothread lock! */
373 32c835ba Paolo Bonzini
374 dd2df737 Juan Quintela
static void migration_bitmap_sync(void)
375 dd2df737 Juan Quintela
{
376 c6bf8e0e Juan Quintela
    RAMBlock *block;
377 c6bf8e0e Juan Quintela
    ram_addr_t addr;
378 c6bf8e0e Juan Quintela
    uint64_t num_dirty_pages_init = migration_dirty_pages;
379 8d017193 Juan Quintela
    MigrationState *s = migrate_get_current();
380 8d017193 Juan Quintela
    static int64_t start_time;
381 8d017193 Juan Quintela
    static int64_t num_dirty_pages_period;
382 8d017193 Juan Quintela
    int64_t end_time;
383 8d017193 Juan Quintela
384 8d017193 Juan Quintela
    if (!start_time) {
385 8d017193 Juan Quintela
        start_time = qemu_get_clock_ms(rt_clock);
386 8d017193 Juan Quintela
    }
387 3c12193d Juan Quintela
388 3c12193d Juan Quintela
    trace_migration_bitmap_sync_start();
389 dd2df737 Juan Quintela
    memory_global_sync_dirty_bitmap(get_system_memory());
390 c6bf8e0e Juan Quintela
391 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
392 c6bf8e0e Juan Quintela
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
393 ece79318 Juan Quintela
            if (memory_region_test_and_clear_dirty(block->mr,
394 ece79318 Juan Quintela
                                                   addr, TARGET_PAGE_SIZE,
395 ece79318 Juan Quintela
                                                   DIRTY_MEMORY_MIGRATION)) {
396 c6bf8e0e Juan Quintela
                migration_bitmap_set_dirty(block->mr, addr);
397 c6bf8e0e Juan Quintela
            }
398 c6bf8e0e Juan Quintela
        }
399 c6bf8e0e Juan Quintela
    }
400 c6bf8e0e Juan Quintela
    trace_migration_bitmap_sync_end(migration_dirty_pages
401 3c12193d Juan Quintela
                                    - num_dirty_pages_init);
402 8d017193 Juan Quintela
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
403 8d017193 Juan Quintela
    end_time = qemu_get_clock_ms(rt_clock);
404 8d017193 Juan Quintela
405 8d017193 Juan Quintela
    /* more than 1 second = 1000 millisecons */
406 8d017193 Juan Quintela
    if (end_time > start_time + 1000) {
407 8d017193 Juan Quintela
        s->dirty_pages_rate = num_dirty_pages_period * 1000
408 8d017193 Juan Quintela
            / (end_time - start_time);
409 90f8ae72 Juan Quintela
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
410 8d017193 Juan Quintela
        start_time = end_time;
411 8d017193 Juan Quintela
        num_dirty_pages_period = 0;
412 8d017193 Juan Quintela
    }
413 dd2df737 Juan Quintela
}
414 dd2df737 Juan Quintela
415 6c779f22 Orit Wasserman
/*
416 6c779f22 Orit Wasserman
 * ram_save_block: Writes a page of memory to the stream f
417 6c779f22 Orit Wasserman
 *
418 b823ceaa Juan Quintela
 * Returns:  The number of bytes written.
419 b823ceaa Juan Quintela
 *           0 means no dirty pages
420 6c779f22 Orit Wasserman
 */
421 6c779f22 Orit Wasserman
422 dd051c72 Juan Quintela
static int ram_save_block(QEMUFile *f, bool last_stage)
423 ad96090a Blue Swirl
{
424 b23a9a5c Juan Quintela
    RAMBlock *block = last_seen_block;
425 e44359c3 Alex Williamson
    ram_addr_t offset = last_offset;
426 4c8ae0f6 Juan Quintela
    bool complete_round = false;
427 b823ceaa Juan Quintela
    int bytes_sent = 0;
428 71c510e2 Avi Kivity
    MemoryRegion *mr;
429 17ad9b35 Orit Wasserman
    ram_addr_t current_addr;
430 ad96090a Blue Swirl
431 e44359c3 Alex Williamson
    if (!block)
432 a3161038 Paolo Bonzini
        block = QTAILQ_FIRST(&ram_list.blocks);
433 e44359c3 Alex Williamson
434 4c8ae0f6 Juan Quintela
    while (true) {
435 71c510e2 Avi Kivity
        mr = block->mr;
436 4c8ae0f6 Juan Quintela
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
437 4c8ae0f6 Juan Quintela
        if (complete_round && block == last_seen_block &&
438 4c8ae0f6 Juan Quintela
            offset >= last_offset) {
439 4c8ae0f6 Juan Quintela
            break;
440 4c8ae0f6 Juan Quintela
        }
441 4c8ae0f6 Juan Quintela
        if (offset >= block->length) {
442 4c8ae0f6 Juan Quintela
            offset = 0;
443 4c8ae0f6 Juan Quintela
            block = QTAILQ_NEXT(block, next);
444 4c8ae0f6 Juan Quintela
            if (!block) {
445 4c8ae0f6 Juan Quintela
                block = QTAILQ_FIRST(&ram_list.blocks);
446 4c8ae0f6 Juan Quintela
                complete_round = true;
447 78d07ae7 Peter Lieven
                ram_bulk_stage = false;
448 4c8ae0f6 Juan Quintela
            }
449 4c8ae0f6 Juan Quintela
        } else {
450 ad96090a Blue Swirl
            uint8_t *p;
451 5f718a15 Juan Quintela
            int cont = (block == last_sent_block) ?
452 b23a9a5c Juan Quintela
                RAM_SAVE_FLAG_CONTINUE : 0;
453 ad96090a Blue Swirl
454 71c510e2 Avi Kivity
            p = memory_region_get_ram_ptr(mr) + offset;
455 ad96090a Blue Swirl
456 b823ceaa Juan Quintela
            /* In doubt sent page as normal */
457 b823ceaa Juan Quintela
            bytes_sent = -1;
458 3edcd7e6 Peter Lieven
            if (is_zero_page(p)) {
459 004d4c10 Orit Wasserman
                acct_info.dup_pages++;
460 f1c72795 Peter Lieven
                if (!ram_bulk_stage) {
461 f1c72795 Peter Lieven
                    bytes_sent = save_block_hdr(f, block, offset, cont,
462 f1c72795 Peter Lieven
                                                RAM_SAVE_FLAG_COMPRESS);
463 f1c72795 Peter Lieven
                    qemu_put_byte(f, 0);
464 f1c72795 Peter Lieven
                    bytes_sent++;
465 f1c72795 Peter Lieven
                } else {
466 f1c72795 Peter Lieven
                    acct_info.skipped_pages++;
467 f1c72795 Peter Lieven
                    bytes_sent = 0;
468 f1c72795 Peter Lieven
                }
469 5cc11c46 Peter Lieven
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
470 17ad9b35 Orit Wasserman
                current_addr = block->offset + offset;
471 17ad9b35 Orit Wasserman
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
472 dd051c72 Juan Quintela
                                              offset, cont, last_stage);
473 dd051c72 Juan Quintela
                if (!last_stage) {
474 dd051c72 Juan Quintela
                    p = get_cached_data(XBZRLE.cache, current_addr);
475 dd051c72 Juan Quintela
                }
476 17ad9b35 Orit Wasserman
            }
477 17ad9b35 Orit Wasserman
478 b823ceaa Juan Quintela
            /* XBZRLE overflow or normal page */
479 17ad9b35 Orit Wasserman
            if (bytes_sent == -1) {
480 3f7d7b09 Juan Quintela
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
481 ad96090a Blue Swirl
                qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
482 3f7d7b09 Juan Quintela
                bytes_sent += TARGET_PAGE_SIZE;
483 004d4c10 Orit Wasserman
                acct_info.norm_pages++;
484 ad96090a Blue Swirl
            }
485 ad96090a Blue Swirl
486 17ad9b35 Orit Wasserman
            /* if page is unmodified, continue to the next */
487 b823ceaa Juan Quintela
            if (bytes_sent > 0) {
488 5f718a15 Juan Quintela
                last_sent_block = block;
489 17ad9b35 Orit Wasserman
                break;
490 17ad9b35 Orit Wasserman
            }
491 ad96090a Blue Swirl
        }
492 4c8ae0f6 Juan Quintela
    }
493 b23a9a5c Juan Quintela
    last_seen_block = block;
494 e44359c3 Alex Williamson
    last_offset = offset;
495 ad96090a Blue Swirl
496 3fc250b4 Pierre Riteau
    return bytes_sent;
497 ad96090a Blue Swirl
}
498 ad96090a Blue Swirl
499 ad96090a Blue Swirl
static uint64_t bytes_transferred;
500 ad96090a Blue Swirl
501 ad96090a Blue Swirl
static ram_addr_t ram_save_remaining(void)
502 ad96090a Blue Swirl
{
503 c6bf8e0e Juan Quintela
    return migration_dirty_pages;
504 ad96090a Blue Swirl
}
505 ad96090a Blue Swirl
506 ad96090a Blue Swirl
uint64_t ram_bytes_remaining(void)
507 ad96090a Blue Swirl
{
508 ad96090a Blue Swirl
    return ram_save_remaining() * TARGET_PAGE_SIZE;
509 ad96090a Blue Swirl
}
510 ad96090a Blue Swirl
511 ad96090a Blue Swirl
uint64_t ram_bytes_transferred(void)
512 ad96090a Blue Swirl
{
513 ad96090a Blue Swirl
    return bytes_transferred;
514 ad96090a Blue Swirl
}
515 ad96090a Blue Swirl
516 ad96090a Blue Swirl
uint64_t ram_bytes_total(void)
517 ad96090a Blue Swirl
{
518 d17b5288 Alex Williamson
    RAMBlock *block;
519 d17b5288 Alex Williamson
    uint64_t total = 0;
520 d17b5288 Alex Williamson
521 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
522 d17b5288 Alex Williamson
        total += block->length;
523 d17b5288 Alex Williamson
524 d17b5288 Alex Williamson
    return total;
525 ad96090a Blue Swirl
}
526 ad96090a Blue Swirl
527 8e21cd32 Orit Wasserman
static void migration_end(void)
528 8e21cd32 Orit Wasserman
{
529 244eaa75 Paolo Bonzini
    if (migration_bitmap) {
530 244eaa75 Paolo Bonzini
        memory_global_dirty_log_stop();
531 244eaa75 Paolo Bonzini
        g_free(migration_bitmap);
532 244eaa75 Paolo Bonzini
        migration_bitmap = NULL;
533 244eaa75 Paolo Bonzini
    }
534 17ad9b35 Orit Wasserman
535 244eaa75 Paolo Bonzini
    if (XBZRLE.cache) {
536 17ad9b35 Orit Wasserman
        cache_fini(XBZRLE.cache);
537 17ad9b35 Orit Wasserman
        g_free(XBZRLE.cache);
538 17ad9b35 Orit Wasserman
        g_free(XBZRLE.encoded_buf);
539 17ad9b35 Orit Wasserman
        g_free(XBZRLE.current_buf);
540 17ad9b35 Orit Wasserman
        g_free(XBZRLE.decoded_buf);
541 17ad9b35 Orit Wasserman
        XBZRLE.cache = NULL;
542 17ad9b35 Orit Wasserman
    }
543 8e21cd32 Orit Wasserman
}
544 8e21cd32 Orit Wasserman
545 9b5bfab0 Juan Quintela
static void ram_migration_cancel(void *opaque)
546 9b5bfab0 Juan Quintela
{
547 9b5bfab0 Juan Quintela
    migration_end();
548 9b5bfab0 Juan Quintela
}
549 9b5bfab0 Juan Quintela
550 5a170775 Juan Quintela
static void reset_ram_globals(void)
551 5a170775 Juan Quintela
{
552 b23a9a5c Juan Quintela
    last_seen_block = NULL;
553 5f718a15 Juan Quintela
    last_sent_block = NULL;
554 5a170775 Juan Quintela
    last_offset = 0;
555 f798b07f Umesh Deshpande
    last_version = ram_list.version;
556 78d07ae7 Peter Lieven
    ram_bulk_stage = true;
557 5a170775 Juan Quintela
}
558 5a170775 Juan Quintela
559 4508bd9e Juan Quintela
#define MAX_WAIT 50 /* ms, half buffered_file limit */
560 4508bd9e Juan Quintela
561 d1315aac Juan Quintela
static int ram_save_setup(QEMUFile *f, void *opaque)
562 ad96090a Blue Swirl
{
563 d1315aac Juan Quintela
    RAMBlock *block;
564 c6bf8e0e Juan Quintela
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
565 c6bf8e0e Juan Quintela
566 c6bf8e0e Juan Quintela
    migration_bitmap = bitmap_new(ram_pages);
567 7ec81e56 David Gibson
    bitmap_set(migration_bitmap, 0, ram_pages);
568 c6bf8e0e Juan Quintela
    migration_dirty_pages = ram_pages;
569 ad96090a Blue Swirl
570 17ad9b35 Orit Wasserman
    if (migrate_use_xbzrle()) {
571 17ad9b35 Orit Wasserman
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
572 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE,
573 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE);
574 17ad9b35 Orit Wasserman
        if (!XBZRLE.cache) {
575 17ad9b35 Orit Wasserman
            DPRINTF("Error creating cache\n");
576 17ad9b35 Orit Wasserman
            return -1;
577 17ad9b35 Orit Wasserman
        }
578 17ad9b35 Orit Wasserman
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
579 17ad9b35 Orit Wasserman
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
580 004d4c10 Orit Wasserman
        acct_clear();
581 17ad9b35 Orit Wasserman
    }
582 17ad9b35 Orit Wasserman
583 9b095037 Paolo Bonzini
    qemu_mutex_lock_iothread();
584 9b095037 Paolo Bonzini
    qemu_mutex_lock_ramlist();
585 9b095037 Paolo Bonzini
    bytes_transferred = 0;
586 9b095037 Paolo Bonzini
    reset_ram_globals();
587 9b095037 Paolo Bonzini
588 d1315aac Juan Quintela
    memory_global_dirty_log_start();
589 c6bf8e0e Juan Quintela
    migration_bitmap_sync();
590 9b095037 Paolo Bonzini
    qemu_mutex_unlock_iothread();
591 ad96090a Blue Swirl
592 d1315aac Juan Quintela
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
593 97ab12d4 Alex Williamson
594 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
595 d1315aac Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
596 d1315aac Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
597 d1315aac Juan Quintela
        qemu_put_be64(f, block->length);
598 ad96090a Blue Swirl
    }
599 ad96090a Blue Swirl
600 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
601 d1315aac Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
602 d1315aac Juan Quintela
603 d1315aac Juan Quintela
    return 0;
604 d1315aac Juan Quintela
}
605 d1315aac Juan Quintela
606 16310a3c Juan Quintela
static int ram_save_iterate(QEMUFile *f, void *opaque)
607 d1315aac Juan Quintela
{
608 d1315aac Juan Quintela
    int ret;
609 d1315aac Juan Quintela
    int i;
610 e4ed1541 Juan Quintela
    int64_t t0;
611 b823ceaa Juan Quintela
    int total_sent = 0;
612 d1315aac Juan Quintela
613 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
614 b2a8658e Umesh Deshpande
615 f798b07f Umesh Deshpande
    if (ram_list.version != last_version) {
616 f798b07f Umesh Deshpande
        reset_ram_globals();
617 f798b07f Umesh Deshpande
    }
618 f798b07f Umesh Deshpande
619 e4ed1541 Juan Quintela
    t0 = qemu_get_clock_ns(rt_clock);
620 4508bd9e Juan Quintela
    i = 0;
621 2975725f Juan Quintela
    while ((ret = qemu_file_rate_limit(f)) == 0) {
622 3fc250b4 Pierre Riteau
        int bytes_sent;
623 ad96090a Blue Swirl
624 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, false);
625 6c779f22 Orit Wasserman
        /* no more blocks to sent */
626 b823ceaa Juan Quintela
        if (bytes_sent == 0) {
627 ad96090a Blue Swirl
            break;
628 ad96090a Blue Swirl
        }
629 b823ceaa Juan Quintela
        total_sent += bytes_sent;
630 004d4c10 Orit Wasserman
        acct_info.iterations++;
631 4508bd9e Juan Quintela
        /* we want to check in the 1st loop, just in case it was the 1st time
632 4508bd9e Juan Quintela
           and we had to sync the dirty bitmap.
633 4508bd9e Juan Quintela
           qemu_get_clock_ns() is a bit expensive, so we only check each some
634 4508bd9e Juan Quintela
           iterations
635 4508bd9e Juan Quintela
        */
636 4508bd9e Juan Quintela
        if ((i & 63) == 0) {
637 e4ed1541 Juan Quintela
            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - t0) / 1000000;
638 4508bd9e Juan Quintela
            if (t1 > MAX_WAIT) {
639 ef37a699 Igor Mitsyanko
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
640 4508bd9e Juan Quintela
                        t1, i);
641 4508bd9e Juan Quintela
                break;
642 4508bd9e Juan Quintela
            }
643 4508bd9e Juan Quintela
        }
644 4508bd9e Juan Quintela
        i++;
645 ad96090a Blue Swirl
    }
646 ad96090a Blue Swirl
647 fb3409de Paolo Bonzini
    qemu_mutex_unlock_ramlist();
648 fb3409de Paolo Bonzini
649 2975725f Juan Quintela
    if (ret < 0) {
650 b823ceaa Juan Quintela
        bytes_transferred += total_sent;
651 2975725f Juan Quintela
        return ret;
652 2975725f Juan Quintela
    }
653 2975725f Juan Quintela
654 16310a3c Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
655 b823ceaa Juan Quintela
    total_sent += 8;
656 b823ceaa Juan Quintela
    bytes_transferred += total_sent;
657 16310a3c Juan Quintela
658 b823ceaa Juan Quintela
    return total_sent;
659 16310a3c Juan Quintela
}
660 16310a3c Juan Quintela
661 16310a3c Juan Quintela
static int ram_save_complete(QEMUFile *f, void *opaque)
662 16310a3c Juan Quintela
{
663 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
664 9c339485 Paolo Bonzini
    migration_bitmap_sync();
665 b2a8658e Umesh Deshpande
666 ad96090a Blue Swirl
    /* try transferring iterative blocks of memory */
667 3a697f69 Orit Wasserman
668 16310a3c Juan Quintela
    /* flush all remaining blocks regardless of rate limiting */
669 6c779f22 Orit Wasserman
    while (true) {
670 3fc250b4 Pierre Riteau
        int bytes_sent;
671 3fc250b4 Pierre Riteau
672 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, true);
673 6c779f22 Orit Wasserman
        /* no more blocks to sent */
674 b823ceaa Juan Quintela
        if (bytes_sent == 0) {
675 6c779f22 Orit Wasserman
            break;
676 ad96090a Blue Swirl
        }
677 16310a3c Juan Quintela
        bytes_transferred += bytes_sent;
678 ad96090a Blue Swirl
    }
679 244eaa75 Paolo Bonzini
    migration_end();
680 ad96090a Blue Swirl
681 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
682 ad96090a Blue Swirl
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
683 ad96090a Blue Swirl
684 5b3c9638 Juan Quintela
    return 0;
685 ad96090a Blue Swirl
}
686 ad96090a Blue Swirl
687 e4ed1541 Juan Quintela
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
688 e4ed1541 Juan Quintela
{
689 e4ed1541 Juan Quintela
    uint64_t remaining_size;
690 e4ed1541 Juan Quintela
691 e4ed1541 Juan Quintela
    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
692 e4ed1541 Juan Quintela
693 e4ed1541 Juan Quintela
    if (remaining_size < max_size) {
694 32c835ba Paolo Bonzini
        qemu_mutex_lock_iothread();
695 e4ed1541 Juan Quintela
        migration_bitmap_sync();
696 32c835ba Paolo Bonzini
        qemu_mutex_unlock_iothread();
697 e4ed1541 Juan Quintela
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
698 e4ed1541 Juan Quintela
    }
699 e4ed1541 Juan Quintela
    return remaining_size;
700 e4ed1541 Juan Quintela
}
701 e4ed1541 Juan Quintela
702 17ad9b35 Orit Wasserman
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
703 17ad9b35 Orit Wasserman
{
704 17ad9b35 Orit Wasserman
    int ret, rc = 0;
705 17ad9b35 Orit Wasserman
    unsigned int xh_len;
706 17ad9b35 Orit Wasserman
    int xh_flags;
707 17ad9b35 Orit Wasserman
708 17ad9b35 Orit Wasserman
    if (!XBZRLE.decoded_buf) {
709 17ad9b35 Orit Wasserman
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
710 17ad9b35 Orit Wasserman
    }
711 17ad9b35 Orit Wasserman
712 17ad9b35 Orit Wasserman
    /* extract RLE header */
713 17ad9b35 Orit Wasserman
    xh_flags = qemu_get_byte(f);
714 17ad9b35 Orit Wasserman
    xh_len = qemu_get_be16(f);
715 17ad9b35 Orit Wasserman
716 17ad9b35 Orit Wasserman
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
717 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
718 17ad9b35 Orit Wasserman
        return -1;
719 17ad9b35 Orit Wasserman
    }
720 17ad9b35 Orit Wasserman
721 17ad9b35 Orit Wasserman
    if (xh_len > TARGET_PAGE_SIZE) {
722 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
723 17ad9b35 Orit Wasserman
        return -1;
724 17ad9b35 Orit Wasserman
    }
725 17ad9b35 Orit Wasserman
    /* load data and decode */
726 17ad9b35 Orit Wasserman
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
727 17ad9b35 Orit Wasserman
728 17ad9b35 Orit Wasserman
    /* decode RLE */
729 17ad9b35 Orit Wasserman
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
730 17ad9b35 Orit Wasserman
                               TARGET_PAGE_SIZE);
731 17ad9b35 Orit Wasserman
    if (ret == -1) {
732 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
733 17ad9b35 Orit Wasserman
        rc = -1;
734 17ad9b35 Orit Wasserman
    } else  if (ret > TARGET_PAGE_SIZE) {
735 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
736 17ad9b35 Orit Wasserman
                ret, TARGET_PAGE_SIZE);
737 17ad9b35 Orit Wasserman
        abort();
738 17ad9b35 Orit Wasserman
    }
739 17ad9b35 Orit Wasserman
740 17ad9b35 Orit Wasserman
    return rc;
741 17ad9b35 Orit Wasserman
}
742 17ad9b35 Orit Wasserman
743 a55bbe31 Alex Williamson
static inline void *host_from_stream_offset(QEMUFile *f,
744 a55bbe31 Alex Williamson
                                            ram_addr_t offset,
745 a55bbe31 Alex Williamson
                                            int flags)
746 a55bbe31 Alex Williamson
{
747 a55bbe31 Alex Williamson
    static RAMBlock *block = NULL;
748 a55bbe31 Alex Williamson
    char id[256];
749 a55bbe31 Alex Williamson
    uint8_t len;
750 a55bbe31 Alex Williamson
751 a55bbe31 Alex Williamson
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
752 a55bbe31 Alex Williamson
        if (!block) {
753 a55bbe31 Alex Williamson
            fprintf(stderr, "Ack, bad migration stream!\n");
754 a55bbe31 Alex Williamson
            return NULL;
755 a55bbe31 Alex Williamson
        }
756 a55bbe31 Alex Williamson
757 dc94a7ed Avi Kivity
        return memory_region_get_ram_ptr(block->mr) + offset;
758 a55bbe31 Alex Williamson
    }
759 a55bbe31 Alex Williamson
760 a55bbe31 Alex Williamson
    len = qemu_get_byte(f);
761 a55bbe31 Alex Williamson
    qemu_get_buffer(f, (uint8_t *)id, len);
762 a55bbe31 Alex Williamson
    id[len] = 0;
763 a55bbe31 Alex Williamson
764 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
765 a55bbe31 Alex Williamson
        if (!strncmp(id, block->idstr, sizeof(id)))
766 dc94a7ed Avi Kivity
            return memory_region_get_ram_ptr(block->mr) + offset;
767 a55bbe31 Alex Williamson
    }
768 a55bbe31 Alex Williamson
769 a55bbe31 Alex Williamson
    fprintf(stderr, "Can't find block %s!\n", id);
770 a55bbe31 Alex Williamson
    return NULL;
771 a55bbe31 Alex Williamson
}
772 a55bbe31 Alex Williamson
773 7908c78d Juan Quintela
static int ram_load(QEMUFile *f, void *opaque, int version_id)
774 ad96090a Blue Swirl
{
775 ad96090a Blue Swirl
    ram_addr_t addr;
776 3a697f69 Orit Wasserman
    int flags, ret = 0;
777 42802d47 Juan Quintela
    int error;
778 3a697f69 Orit Wasserman
    static uint64_t seq_iter;
779 3a697f69 Orit Wasserman
780 3a697f69 Orit Wasserman
    seq_iter++;
781 ad96090a Blue Swirl
782 f09f2189 Avi Kivity
    if (version_id < 4 || version_id > 4) {
783 ad96090a Blue Swirl
        return -EINVAL;
784 ad96090a Blue Swirl
    }
785 ad96090a Blue Swirl
786 ad96090a Blue Swirl
    do {
787 ad96090a Blue Swirl
        addr = qemu_get_be64(f);
788 ad96090a Blue Swirl
789 ad96090a Blue Swirl
        flags = addr & ~TARGET_PAGE_MASK;
790 ad96090a Blue Swirl
        addr &= TARGET_PAGE_MASK;
791 ad96090a Blue Swirl
792 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
793 f09f2189 Avi Kivity
            if (version_id == 4) {
794 97ab12d4 Alex Williamson
                /* Synchronize RAM block list */
795 97ab12d4 Alex Williamson
                char id[256];
796 97ab12d4 Alex Williamson
                ram_addr_t length;
797 97ab12d4 Alex Williamson
                ram_addr_t total_ram_bytes = addr;
798 97ab12d4 Alex Williamson
799 97ab12d4 Alex Williamson
                while (total_ram_bytes) {
800 97ab12d4 Alex Williamson
                    RAMBlock *block;
801 97ab12d4 Alex Williamson
                    uint8_t len;
802 97ab12d4 Alex Williamson
803 97ab12d4 Alex Williamson
                    len = qemu_get_byte(f);
804 97ab12d4 Alex Williamson
                    qemu_get_buffer(f, (uint8_t *)id, len);
805 97ab12d4 Alex Williamson
                    id[len] = 0;
806 97ab12d4 Alex Williamson
                    length = qemu_get_be64(f);
807 97ab12d4 Alex Williamson
808 a3161038 Paolo Bonzini
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
809 97ab12d4 Alex Williamson
                        if (!strncmp(id, block->idstr, sizeof(id))) {
810 3a697f69 Orit Wasserman
                            if (block->length != length) {
811 3a697f69 Orit Wasserman
                                ret =  -EINVAL;
812 3a697f69 Orit Wasserman
                                goto done;
813 3a697f69 Orit Wasserman
                            }
814 97ab12d4 Alex Williamson
                            break;
815 97ab12d4 Alex Williamson
                        }
816 97ab12d4 Alex Williamson
                    }
817 97ab12d4 Alex Williamson
818 97ab12d4 Alex Williamson
                    if (!block) {
819 fb787f81 Alex Williamson
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
820 fb787f81 Alex Williamson
                                "accept migration\n", id);
821 3a697f69 Orit Wasserman
                        ret = -EINVAL;
822 3a697f69 Orit Wasserman
                        goto done;
823 97ab12d4 Alex Williamson
                    }
824 97ab12d4 Alex Williamson
825 97ab12d4 Alex Williamson
                    total_ram_bytes -= length;
826 97ab12d4 Alex Williamson
                }
827 ad96090a Blue Swirl
            }
828 ad96090a Blue Swirl
        }
829 ad96090a Blue Swirl
830 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
831 97ab12d4 Alex Williamson
            void *host;
832 97ab12d4 Alex Williamson
            uint8_t ch;
833 97ab12d4 Alex Williamson
834 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
835 492fb99c Michael S. Tsirkin
            if (!host) {
836 492fb99c Michael S. Tsirkin
                return -EINVAL;
837 492fb99c Michael S. Tsirkin
            }
838 97ab12d4 Alex Williamson
839 97ab12d4 Alex Williamson
            ch = qemu_get_byte(f);
840 97ab12d4 Alex Williamson
            memset(host, ch, TARGET_PAGE_SIZE);
841 ad96090a Blue Swirl
#ifndef _WIN32
842 ad96090a Blue Swirl
            if (ch == 0 &&
843 45e6cee4 David Gibson
                (!kvm_enabled() || kvm_has_sync_mmu()) &&
844 45e6cee4 David Gibson
                getpagesize() <= TARGET_PAGE_SIZE) {
845 e78815a5 Andreas Färber
                qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
846 ad96090a Blue Swirl
            }
847 ad96090a Blue Swirl
#endif
848 ad96090a Blue Swirl
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
849 97ab12d4 Alex Williamson
            void *host;
850 97ab12d4 Alex Williamson
851 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
852 0ff1f9f5 Orit Wasserman
            if (!host) {
853 0ff1f9f5 Orit Wasserman
                return -EINVAL;
854 0ff1f9f5 Orit Wasserman
            }
855 97ab12d4 Alex Williamson
856 97ab12d4 Alex Williamson
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
857 17ad9b35 Orit Wasserman
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
858 17ad9b35 Orit Wasserman
            void *host = host_from_stream_offset(f, addr, flags);
859 17ad9b35 Orit Wasserman
            if (!host) {
860 17ad9b35 Orit Wasserman
                return -EINVAL;
861 17ad9b35 Orit Wasserman
            }
862 17ad9b35 Orit Wasserman
863 17ad9b35 Orit Wasserman
            if (load_xbzrle(f, addr, host) < 0) {
864 17ad9b35 Orit Wasserman
                ret = -EINVAL;
865 17ad9b35 Orit Wasserman
                goto done;
866 17ad9b35 Orit Wasserman
            }
867 ad96090a Blue Swirl
        }
868 42802d47 Juan Quintela
        error = qemu_file_get_error(f);
869 42802d47 Juan Quintela
        if (error) {
870 3a697f69 Orit Wasserman
            ret = error;
871 3a697f69 Orit Wasserman
            goto done;
872 ad96090a Blue Swirl
        }
873 ad96090a Blue Swirl
    } while (!(flags & RAM_SAVE_FLAG_EOS));
874 ad96090a Blue Swirl
875 3a697f69 Orit Wasserman
done:
876 ef37a699 Igor Mitsyanko
    DPRINTF("Completed load of VM with exit code %d seq iteration "
877 ef37a699 Igor Mitsyanko
            "%" PRIu64 "\n", ret, seq_iter);
878 3a697f69 Orit Wasserman
    return ret;
879 ad96090a Blue Swirl
}
880 ad96090a Blue Swirl
881 7908c78d Juan Quintela
SaveVMHandlers savevm_ram_handlers = {
882 d1315aac Juan Quintela
    .save_live_setup = ram_save_setup,
883 16310a3c Juan Quintela
    .save_live_iterate = ram_save_iterate,
884 16310a3c Juan Quintela
    .save_live_complete = ram_save_complete,
885 e4ed1541 Juan Quintela
    .save_live_pending = ram_save_pending,
886 7908c78d Juan Quintela
    .load_state = ram_load,
887 9b5bfab0 Juan Quintela
    .cancel = ram_migration_cancel,
888 7908c78d Juan Quintela
};
889 7908c78d Juan Quintela
890 ad96090a Blue Swirl
#ifdef HAS_AUDIO
891 0dfa5ef9 Isaku Yamahata
struct soundhw {
892 0dfa5ef9 Isaku Yamahata
    const char *name;
893 0dfa5ef9 Isaku Yamahata
    const char *descr;
894 0dfa5ef9 Isaku Yamahata
    int enabled;
895 0dfa5ef9 Isaku Yamahata
    int isa;
896 0dfa5ef9 Isaku Yamahata
    union {
897 4a0f031d Hervé Poussineau
        int (*init_isa) (ISABus *bus);
898 0dfa5ef9 Isaku Yamahata
        int (*init_pci) (PCIBus *bus);
899 0dfa5ef9 Isaku Yamahata
    } init;
900 0dfa5ef9 Isaku Yamahata
};
901 0dfa5ef9 Isaku Yamahata
902 0dfa5ef9 Isaku Yamahata
static struct soundhw soundhw[] = {
903 ad96090a Blue Swirl
#ifdef HAS_AUDIO_CHOICE
904 da12872a Hervé Poussineau
#ifdef CONFIG_PCSPK
905 ad96090a Blue Swirl
    {
906 ad96090a Blue Swirl
        "pcspk",
907 ad96090a Blue Swirl
        "PC speaker",
908 ad96090a Blue Swirl
        0,
909 ad96090a Blue Swirl
        1,
910 ad96090a Blue Swirl
        { .init_isa = pcspk_audio_init }
911 ad96090a Blue Swirl
    },
912 ad96090a Blue Swirl
#endif
913 ad96090a Blue Swirl
914 ad96090a Blue Swirl
#ifdef CONFIG_SB16
915 ad96090a Blue Swirl
    {
916 ad96090a Blue Swirl
        "sb16",
917 ad96090a Blue Swirl
        "Creative Sound Blaster 16",
918 ad96090a Blue Swirl
        0,
919 ad96090a Blue Swirl
        1,
920 ad96090a Blue Swirl
        { .init_isa = SB16_init }
921 ad96090a Blue Swirl
    },
922 ad96090a Blue Swirl
#endif
923 ad96090a Blue Swirl
924 ad96090a Blue Swirl
#ifdef CONFIG_CS4231A
925 ad96090a Blue Swirl
    {
926 ad96090a Blue Swirl
        "cs4231a",
927 ad96090a Blue Swirl
        "CS4231A",
928 ad96090a Blue Swirl
        0,
929 ad96090a Blue Swirl
        1,
930 ad96090a Blue Swirl
        { .init_isa = cs4231a_init }
931 ad96090a Blue Swirl
    },
932 ad96090a Blue Swirl
#endif
933 ad96090a Blue Swirl
934 ad96090a Blue Swirl
#ifdef CONFIG_ADLIB
935 ad96090a Blue Swirl
    {
936 ad96090a Blue Swirl
        "adlib",
937 ad96090a Blue Swirl
#ifdef HAS_YMF262
938 ad96090a Blue Swirl
        "Yamaha YMF262 (OPL3)",
939 ad96090a Blue Swirl
#else
940 ad96090a Blue Swirl
        "Yamaha YM3812 (OPL2)",
941 ad96090a Blue Swirl
#endif
942 ad96090a Blue Swirl
        0,
943 ad96090a Blue Swirl
        1,
944 ad96090a Blue Swirl
        { .init_isa = Adlib_init }
945 ad96090a Blue Swirl
    },
946 ad96090a Blue Swirl
#endif
947 ad96090a Blue Swirl
948 ad96090a Blue Swirl
#ifdef CONFIG_GUS
949 ad96090a Blue Swirl
    {
950 ad96090a Blue Swirl
        "gus",
951 ad96090a Blue Swirl
        "Gravis Ultrasound GF1",
952 ad96090a Blue Swirl
        0,
953 ad96090a Blue Swirl
        1,
954 ad96090a Blue Swirl
        { .init_isa = GUS_init }
955 ad96090a Blue Swirl
    },
956 ad96090a Blue Swirl
#endif
957 ad96090a Blue Swirl
958 ad96090a Blue Swirl
#ifdef CONFIG_AC97
959 ad96090a Blue Swirl
    {
960 ad96090a Blue Swirl
        "ac97",
961 ad96090a Blue Swirl
        "Intel 82801AA AC97 Audio",
962 ad96090a Blue Swirl
        0,
963 ad96090a Blue Swirl
        0,
964 ad96090a Blue Swirl
        { .init_pci = ac97_init }
965 ad96090a Blue Swirl
    },
966 ad96090a Blue Swirl
#endif
967 ad96090a Blue Swirl
968 ad96090a Blue Swirl
#ifdef CONFIG_ES1370
969 ad96090a Blue Swirl
    {
970 ad96090a Blue Swirl
        "es1370",
971 ad96090a Blue Swirl
        "ENSONIQ AudioPCI ES1370",
972 ad96090a Blue Swirl
        0,
973 ad96090a Blue Swirl
        0,
974 ad96090a Blue Swirl
        { .init_pci = es1370_init }
975 ad96090a Blue Swirl
    },
976 ad96090a Blue Swirl
#endif
977 ad96090a Blue Swirl
978 d61a4ce8 Gerd Hoffmann
#ifdef CONFIG_HDA
979 d61a4ce8 Gerd Hoffmann
    {
980 d61a4ce8 Gerd Hoffmann
        "hda",
981 d61a4ce8 Gerd Hoffmann
        "Intel HD Audio",
982 d61a4ce8 Gerd Hoffmann
        0,
983 d61a4ce8 Gerd Hoffmann
        0,
984 d61a4ce8 Gerd Hoffmann
        { .init_pci = intel_hda_and_codec_init }
985 d61a4ce8 Gerd Hoffmann
    },
986 d61a4ce8 Gerd Hoffmann
#endif
987 d61a4ce8 Gerd Hoffmann
988 ad96090a Blue Swirl
#endif /* HAS_AUDIO_CHOICE */
989 ad96090a Blue Swirl
990 ad96090a Blue Swirl
    { NULL, NULL, 0, 0, { NULL } }
991 ad96090a Blue Swirl
};
992 ad96090a Blue Swirl
993 ad96090a Blue Swirl
void select_soundhw(const char *optarg)
994 ad96090a Blue Swirl
{
995 ad96090a Blue Swirl
    struct soundhw *c;
996 ad96090a Blue Swirl
997 c8057f95 Peter Maydell
    if (is_help_option(optarg)) {
998 ad96090a Blue Swirl
    show_valid_cards:
999 ad96090a Blue Swirl
1000 55d4fd3c Peter Maydell
#ifdef HAS_AUDIO_CHOICE
1001 ad96090a Blue Swirl
        printf("Valid sound card names (comma separated):\n");
1002 ad96090a Blue Swirl
        for (c = soundhw; c->name; ++c) {
1003 ad96090a Blue Swirl
            printf ("%-11s %s\n", c->name, c->descr);
1004 ad96090a Blue Swirl
        }
1005 ad96090a Blue Swirl
        printf("\n-soundhw all will enable all of the above\n");
1006 55d4fd3c Peter Maydell
#else
1007 55d4fd3c Peter Maydell
        printf("Machine has no user-selectable audio hardware "
1008 55d4fd3c Peter Maydell
               "(it may or may not have always-present audio hardware).\n");
1009 55d4fd3c Peter Maydell
#endif
1010 c8057f95 Peter Maydell
        exit(!is_help_option(optarg));
1011 ad96090a Blue Swirl
    }
1012 ad96090a Blue Swirl
    else {
1013 ad96090a Blue Swirl
        size_t l;
1014 ad96090a Blue Swirl
        const char *p;
1015 ad96090a Blue Swirl
        char *e;
1016 ad96090a Blue Swirl
        int bad_card = 0;
1017 ad96090a Blue Swirl
1018 ad96090a Blue Swirl
        if (!strcmp(optarg, "all")) {
1019 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1020 ad96090a Blue Swirl
                c->enabled = 1;
1021 ad96090a Blue Swirl
            }
1022 ad96090a Blue Swirl
            return;
1023 ad96090a Blue Swirl
        }
1024 ad96090a Blue Swirl
1025 ad96090a Blue Swirl
        p = optarg;
1026 ad96090a Blue Swirl
        while (*p) {
1027 ad96090a Blue Swirl
            e = strchr(p, ',');
1028 ad96090a Blue Swirl
            l = !e ? strlen(p) : (size_t) (e - p);
1029 ad96090a Blue Swirl
1030 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1031 ad96090a Blue Swirl
                if (!strncmp(c->name, p, l) && !c->name[l]) {
1032 ad96090a Blue Swirl
                    c->enabled = 1;
1033 ad96090a Blue Swirl
                    break;
1034 ad96090a Blue Swirl
                }
1035 ad96090a Blue Swirl
            }
1036 ad96090a Blue Swirl
1037 ad96090a Blue Swirl
            if (!c->name) {
1038 ad96090a Blue Swirl
                if (l > 80) {
1039 ad96090a Blue Swirl
                    fprintf(stderr,
1040 ad96090a Blue Swirl
                            "Unknown sound card name (too big to show)\n");
1041 ad96090a Blue Swirl
                }
1042 ad96090a Blue Swirl
                else {
1043 ad96090a Blue Swirl
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
1044 ad96090a Blue Swirl
                            (int) l, p);
1045 ad96090a Blue Swirl
                }
1046 ad96090a Blue Swirl
                bad_card = 1;
1047 ad96090a Blue Swirl
            }
1048 ad96090a Blue Swirl
            p += l + (e != NULL);
1049 ad96090a Blue Swirl
        }
1050 ad96090a Blue Swirl
1051 ad96090a Blue Swirl
        if (bad_card) {
1052 ad96090a Blue Swirl
            goto show_valid_cards;
1053 ad96090a Blue Swirl
        }
1054 ad96090a Blue Swirl
    }
1055 ad96090a Blue Swirl
}
1056 0dfa5ef9 Isaku Yamahata
1057 4a0f031d Hervé Poussineau
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1058 0dfa5ef9 Isaku Yamahata
{
1059 0dfa5ef9 Isaku Yamahata
    struct soundhw *c;
1060 0dfa5ef9 Isaku Yamahata
1061 0dfa5ef9 Isaku Yamahata
    for (c = soundhw; c->name; ++c) {
1062 0dfa5ef9 Isaku Yamahata
        if (c->enabled) {
1063 0dfa5ef9 Isaku Yamahata
            if (c->isa) {
1064 4a0f031d Hervé Poussineau
                if (isa_bus) {
1065 4a0f031d Hervé Poussineau
                    c->init.init_isa(isa_bus);
1066 0dfa5ef9 Isaku Yamahata
                }
1067 0dfa5ef9 Isaku Yamahata
            } else {
1068 0dfa5ef9 Isaku Yamahata
                if (pci_bus) {
1069 0dfa5ef9 Isaku Yamahata
                    c->init.init_pci(pci_bus);
1070 0dfa5ef9 Isaku Yamahata
                }
1071 0dfa5ef9 Isaku Yamahata
            }
1072 0dfa5ef9 Isaku Yamahata
        }
1073 0dfa5ef9 Isaku Yamahata
    }
1074 0dfa5ef9 Isaku Yamahata
}
1075 ad96090a Blue Swirl
#else
1076 ad96090a Blue Swirl
void select_soundhw(const char *optarg)
1077 ad96090a Blue Swirl
{
1078 ad96090a Blue Swirl
}
1079 4a0f031d Hervé Poussineau
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1080 0dfa5ef9 Isaku Yamahata
{
1081 0dfa5ef9 Isaku Yamahata
}
1082 ad96090a Blue Swirl
#endif
1083 ad96090a Blue Swirl
1084 ad96090a Blue Swirl
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1085 ad96090a Blue Swirl
{
1086 ad96090a Blue Swirl
    int ret;
1087 ad96090a Blue Swirl
1088 ad96090a Blue Swirl
    if (strlen(str) != 36) {
1089 ad96090a Blue Swirl
        return -1;
1090 ad96090a Blue Swirl
    }
1091 ad96090a Blue Swirl
1092 ad96090a Blue Swirl
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1093 ad96090a Blue Swirl
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1094 ad96090a Blue Swirl
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1095 ad96090a Blue Swirl
                 &uuid[15]);
1096 ad96090a Blue Swirl
1097 ad96090a Blue Swirl
    if (ret != 16) {
1098 ad96090a Blue Swirl
        return -1;
1099 ad96090a Blue Swirl
    }
1100 ad96090a Blue Swirl
#ifdef TARGET_I386
1101 ad96090a Blue Swirl
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
1102 ad96090a Blue Swirl
#endif
1103 ad96090a Blue Swirl
    return 0;
1104 ad96090a Blue Swirl
}
1105 ad96090a Blue Swirl
1106 ad96090a Blue Swirl
void do_acpitable_option(const char *optarg)
1107 ad96090a Blue Swirl
{
1108 ad96090a Blue Swirl
#ifdef TARGET_I386
1109 ad96090a Blue Swirl
    if (acpi_table_add(optarg) < 0) {
1110 ad96090a Blue Swirl
        fprintf(stderr, "Wrong acpi table provided\n");
1111 ad96090a Blue Swirl
        exit(1);
1112 ad96090a Blue Swirl
    }
1113 ad96090a Blue Swirl
#endif
1114 ad96090a Blue Swirl
}
1115 ad96090a Blue Swirl
1116 ad96090a Blue Swirl
void do_smbios_option(const char *optarg)
1117 ad96090a Blue Swirl
{
1118 ad96090a Blue Swirl
#ifdef TARGET_I386
1119 ad96090a Blue Swirl
    if (smbios_entry_add(optarg) < 0) {
1120 ad96090a Blue Swirl
        fprintf(stderr, "Wrong smbios provided\n");
1121 ad96090a Blue Swirl
        exit(1);
1122 ad96090a Blue Swirl
    }
1123 ad96090a Blue Swirl
#endif
1124 ad96090a Blue Swirl
}
1125 ad96090a Blue Swirl
1126 ad96090a Blue Swirl
void cpudef_init(void)
1127 ad96090a Blue Swirl
{
1128 ad96090a Blue Swirl
#if defined(cpudef_setup)
1129 ad96090a Blue Swirl
    cpudef_setup(); /* parse cpu definitions in target config file */
1130 ad96090a Blue Swirl
#endif
1131 ad96090a Blue Swirl
}
1132 ad96090a Blue Swirl
1133 ad96090a Blue Swirl
int audio_available(void)
1134 ad96090a Blue Swirl
{
1135 ad96090a Blue Swirl
#ifdef HAS_AUDIO
1136 ad96090a Blue Swirl
    return 1;
1137 ad96090a Blue Swirl
#else
1138 ad96090a Blue Swirl
    return 0;
1139 ad96090a Blue Swirl
#endif
1140 ad96090a Blue Swirl
}
1141 ad96090a Blue Swirl
1142 303d4e86 Anthony PERARD
int tcg_available(void)
1143 303d4e86 Anthony PERARD
{
1144 303d4e86 Anthony PERARD
    return 1;
1145 303d4e86 Anthony PERARD
}
1146 303d4e86 Anthony PERARD
1147 ad96090a Blue Swirl
int kvm_available(void)
1148 ad96090a Blue Swirl
{
1149 ad96090a Blue Swirl
#ifdef CONFIG_KVM
1150 ad96090a Blue Swirl
    return 1;
1151 ad96090a Blue Swirl
#else
1152 ad96090a Blue Swirl
    return 0;
1153 ad96090a Blue Swirl
#endif
1154 ad96090a Blue Swirl
}
1155 ad96090a Blue Swirl
1156 ad96090a Blue Swirl
int xen_available(void)
1157 ad96090a Blue Swirl
{
1158 ad96090a Blue Swirl
#ifdef CONFIG_XEN
1159 ad96090a Blue Swirl
    return 1;
1160 ad96090a Blue Swirl
#else
1161 ad96090a Blue Swirl
    return 0;
1162 ad96090a Blue Swirl
#endif
1163 ad96090a Blue Swirl
}
1164 99afc91d Daniel P. Berrange
1165 99afc91d Daniel P. Berrange
1166 99afc91d Daniel P. Berrange
TargetInfo *qmp_query_target(Error **errp)
1167 99afc91d Daniel P. Berrange
{
1168 99afc91d Daniel P. Berrange
    TargetInfo *info = g_malloc0(sizeof(*info));
1169 99afc91d Daniel P. Berrange
1170 99afc91d Daniel P. Berrange
    info->arch = TARGET_TYPE;
1171 99afc91d Daniel P. Berrange
1172 99afc91d Daniel P. Berrange
    return info;
1173 99afc91d Daniel P. Berrange
}