Statistics
| Branch: | Revision:

root / arch_init.c @ b2a8658e

History | View | Annotate | Download (29 kB)

1 ad96090a Blue Swirl
/*
2 ad96090a Blue Swirl
 * QEMU System Emulator
3 ad96090a Blue Swirl
 *
4 ad96090a Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 ad96090a Blue Swirl
 *
6 ad96090a Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 ad96090a Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 ad96090a Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 ad96090a Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 ad96090a Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 ad96090a Blue Swirl
 * furnished to do so, subject to the following conditions:
12 ad96090a Blue Swirl
 *
13 ad96090a Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 ad96090a Blue Swirl
 * all copies or substantial portions of the Software.
15 ad96090a Blue Swirl
 *
16 ad96090a Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 ad96090a Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 ad96090a Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 ad96090a Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 ad96090a Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 ad96090a Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 ad96090a Blue Swirl
 * THE SOFTWARE.
23 ad96090a Blue Swirl
 */
24 ad96090a Blue Swirl
#include <stdint.h>
25 ad96090a Blue Swirl
#include <stdarg.h>
26 b2e0a138 Michael S. Tsirkin
#include <stdlib.h>
27 ad96090a Blue Swirl
#ifndef _WIN32
28 1c47cb16 Blue Swirl
#include <sys/types.h>
29 ad96090a Blue Swirl
#include <sys/mman.h>
30 ad96090a Blue Swirl
#endif
31 ad96090a Blue Swirl
#include "config.h"
32 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
34 1de7afc9 Paolo Bonzini
#include "qemu/bitops.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/arch_init.h"
37 ad96090a Blue Swirl
#include "audio/audio.h"
38 ad96090a Blue Swirl
#include "hw/pc.h"
39 a2cb15b0 Michael S. Tsirkin
#include "hw/pci/pci.h"
40 ad96090a Blue Swirl
#include "hw/audiodev.h"
41 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
42 caf71f86 Paolo Bonzini
#include "migration/migration.h"
43 022c62cb Paolo Bonzini
#include "exec/gdbstub.h"
44 ad96090a Blue Swirl
#include "hw/smbios.h"
45 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
46 302fe51b Jan Kiszka
#include "hw/pcspk.h"
47 caf71f86 Paolo Bonzini
#include "migration/page_cache.h"
48 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
49 99afc91d Daniel P. Berrange
#include "qmp-commands.h"
50 3c12193d Juan Quintela
#include "trace.h"
51 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
52 ad96090a Blue Swirl
53 3a697f69 Orit Wasserman
#ifdef DEBUG_ARCH_INIT
54 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
55 3a697f69 Orit Wasserman
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
56 3a697f69 Orit Wasserman
#else
57 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
58 3a697f69 Orit Wasserman
    do { } while (0)
59 3a697f69 Orit Wasserman
#endif
60 3a697f69 Orit Wasserman
61 ad96090a Blue Swirl
#ifdef TARGET_SPARC
62 ad96090a Blue Swirl
int graphic_width = 1024;
63 ad96090a Blue Swirl
int graphic_height = 768;
64 ad96090a Blue Swirl
int graphic_depth = 8;
65 ad96090a Blue Swirl
#else
66 ad96090a Blue Swirl
int graphic_width = 800;
67 ad96090a Blue Swirl
int graphic_height = 600;
68 ad96090a Blue Swirl
int graphic_depth = 15;
69 ad96090a Blue Swirl
#endif
70 ad96090a Blue Swirl
71 ad96090a Blue Swirl
72 ad96090a Blue Swirl
#if defined(TARGET_ALPHA)
73 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ALPHA
74 ad96090a Blue Swirl
#elif defined(TARGET_ARM)
75 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ARM
76 ad96090a Blue Swirl
#elif defined(TARGET_CRIS)
77 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_CRIS
78 ad96090a Blue Swirl
#elif defined(TARGET_I386)
79 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_I386
80 ad96090a Blue Swirl
#elif defined(TARGET_M68K)
81 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_M68K
82 81ea0e13 Michael Walle
#elif defined(TARGET_LM32)
83 81ea0e13 Michael Walle
#define QEMU_ARCH QEMU_ARCH_LM32
84 ad96090a Blue Swirl
#elif defined(TARGET_MICROBLAZE)
85 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
86 ad96090a Blue Swirl
#elif defined(TARGET_MIPS)
87 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MIPS
88 e67db06e Jia Liu
#elif defined(TARGET_OPENRISC)
89 e67db06e Jia Liu
#define QEMU_ARCH QEMU_ARCH_OPENRISC
90 ad96090a Blue Swirl
#elif defined(TARGET_PPC)
91 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_PPC
92 ad96090a Blue Swirl
#elif defined(TARGET_S390X)
93 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_S390X
94 ad96090a Blue Swirl
#elif defined(TARGET_SH4)
95 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SH4
96 ad96090a Blue Swirl
#elif defined(TARGET_SPARC)
97 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SPARC
98 2328826b Max Filippov
#elif defined(TARGET_XTENSA)
99 2328826b Max Filippov
#define QEMU_ARCH QEMU_ARCH_XTENSA
100 4f23a1e6 Guan Xuetao
#elif defined(TARGET_UNICORE32)
101 4f23a1e6 Guan Xuetao
#define QEMU_ARCH QEMU_ARCH_UNICORE32
102 ad96090a Blue Swirl
#endif
103 ad96090a Blue Swirl
104 ad96090a Blue Swirl
const uint32_t arch_type = QEMU_ARCH;
105 ad96090a Blue Swirl
106 ad96090a Blue Swirl
/***********************************************************/
107 ad96090a Blue Swirl
/* ram save/restore */
108 ad96090a Blue Swirl
109 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
110 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_COMPRESS 0x02
111 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
112 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_PAGE     0x08
113 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_EOS      0x10
114 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_CONTINUE 0x20
115 17ad9b35 Orit Wasserman
#define RAM_SAVE_FLAG_XBZRLE   0x40
116 ad96090a Blue Swirl
117 86003615 Paolo Bonzini
#ifdef __ALTIVEC__
118 86003615 Paolo Bonzini
#include <altivec.h>
119 86003615 Paolo Bonzini
#define VECTYPE        vector unsigned char
120 86003615 Paolo Bonzini
#define SPLAT(p)       vec_splat(vec_ld(0, p), 0)
121 86003615 Paolo Bonzini
#define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
122 f283edc4 Andreas Färber
/* altivec.h may redefine the bool macro as vector type.
123 f283edc4 Andreas Färber
 * Reset it to POSIX semantics. */
124 f283edc4 Andreas Färber
#undef bool
125 f283edc4 Andreas Färber
#define bool _Bool
126 86003615 Paolo Bonzini
#elif defined __SSE2__
127 86003615 Paolo Bonzini
#include <emmintrin.h>
128 86003615 Paolo Bonzini
#define VECTYPE        __m128i
129 86003615 Paolo Bonzini
#define SPLAT(p)       _mm_set1_epi8(*(p))
130 86003615 Paolo Bonzini
#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
131 86003615 Paolo Bonzini
#else
132 86003615 Paolo Bonzini
#define VECTYPE        unsigned long
133 86003615 Paolo Bonzini
#define SPLAT(p)       (*(p) * (~0UL / 255))
134 86003615 Paolo Bonzini
#define ALL_EQ(v1, v2) ((v1) == (v2))
135 86003615 Paolo Bonzini
#endif
136 86003615 Paolo Bonzini
137 b5a8fe5e Eduardo Habkost
138 756557de Eduardo Habkost
static struct defconfig_file {
139 756557de Eduardo Habkost
    const char *filename;
140 f29a5614 Eduardo Habkost
    /* Indicates it is an user config file (disabled by -no-user-config) */
141 f29a5614 Eduardo Habkost
    bool userconfig;
142 756557de Eduardo Habkost
} default_config_files[] = {
143 f29a5614 Eduardo Habkost
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
144 f29a5614 Eduardo Habkost
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
145 756557de Eduardo Habkost
    { NULL }, /* end of list */
146 756557de Eduardo Habkost
};
147 756557de Eduardo Habkost
148 756557de Eduardo Habkost
149 f29a5614 Eduardo Habkost
int qemu_read_default_config_files(bool userconfig)
150 b5a8fe5e Eduardo Habkost
{
151 b5a8fe5e Eduardo Habkost
    int ret;
152 756557de Eduardo Habkost
    struct defconfig_file *f;
153 b5a8fe5e Eduardo Habkost
154 756557de Eduardo Habkost
    for (f = default_config_files; f->filename; f++) {
155 f29a5614 Eduardo Habkost
        if (!userconfig && f->userconfig) {
156 f29a5614 Eduardo Habkost
            continue;
157 f29a5614 Eduardo Habkost
        }
158 756557de Eduardo Habkost
        ret = qemu_read_config_file(f->filename);
159 756557de Eduardo Habkost
        if (ret < 0 && ret != -ENOENT) {
160 756557de Eduardo Habkost
            return ret;
161 756557de Eduardo Habkost
        }
162 b5a8fe5e Eduardo Habkost
    }
163 756557de Eduardo Habkost
    
164 b5a8fe5e Eduardo Habkost
    return 0;
165 b5a8fe5e Eduardo Habkost
}
166 b5a8fe5e Eduardo Habkost
167 86003615 Paolo Bonzini
static int is_dup_page(uint8_t *page)
168 ad96090a Blue Swirl
{
169 86003615 Paolo Bonzini
    VECTYPE *p = (VECTYPE *)page;
170 86003615 Paolo Bonzini
    VECTYPE val = SPLAT(page);
171 ad96090a Blue Swirl
    int i;
172 ad96090a Blue Swirl
173 86003615 Paolo Bonzini
    for (i = 0; i < TARGET_PAGE_SIZE / sizeof(VECTYPE); i++) {
174 86003615 Paolo Bonzini
        if (!ALL_EQ(val, p[i])) {
175 ad96090a Blue Swirl
            return 0;
176 ad96090a Blue Swirl
        }
177 ad96090a Blue Swirl
    }
178 ad96090a Blue Swirl
179 ad96090a Blue Swirl
    return 1;
180 ad96090a Blue Swirl
}
181 ad96090a Blue Swirl
182 17ad9b35 Orit Wasserman
/* struct contains XBZRLE cache and a static page
183 17ad9b35 Orit Wasserman
   used by the compression */
184 17ad9b35 Orit Wasserman
static struct {
185 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE encoding */
186 17ad9b35 Orit Wasserman
    uint8_t *encoded_buf;
187 17ad9b35 Orit Wasserman
    /* buffer for storing page content */
188 17ad9b35 Orit Wasserman
    uint8_t *current_buf;
189 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE decoding */
190 17ad9b35 Orit Wasserman
    uint8_t *decoded_buf;
191 17ad9b35 Orit Wasserman
    /* Cache for XBZRLE */
192 17ad9b35 Orit Wasserman
    PageCache *cache;
193 17ad9b35 Orit Wasserman
} XBZRLE = {
194 17ad9b35 Orit Wasserman
    .encoded_buf = NULL,
195 17ad9b35 Orit Wasserman
    .current_buf = NULL,
196 17ad9b35 Orit Wasserman
    .decoded_buf = NULL,
197 17ad9b35 Orit Wasserman
    .cache = NULL,
198 17ad9b35 Orit Wasserman
};
199 17ad9b35 Orit Wasserman
200 9e1ba4cc Orit Wasserman
201 9e1ba4cc Orit Wasserman
int64_t xbzrle_cache_resize(int64_t new_size)
202 9e1ba4cc Orit Wasserman
{
203 9e1ba4cc Orit Wasserman
    if (XBZRLE.cache != NULL) {
204 9e1ba4cc Orit Wasserman
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
205 9e1ba4cc Orit Wasserman
            TARGET_PAGE_SIZE;
206 9e1ba4cc Orit Wasserman
    }
207 9e1ba4cc Orit Wasserman
    return pow2floor(new_size);
208 9e1ba4cc Orit Wasserman
}
209 9e1ba4cc Orit Wasserman
210 004d4c10 Orit Wasserman
/* accounting for migration statistics */
211 004d4c10 Orit Wasserman
typedef struct AccountingInfo {
212 004d4c10 Orit Wasserman
    uint64_t dup_pages;
213 004d4c10 Orit Wasserman
    uint64_t norm_pages;
214 004d4c10 Orit Wasserman
    uint64_t iterations;
215 f36d55af Orit Wasserman
    uint64_t xbzrle_bytes;
216 f36d55af Orit Wasserman
    uint64_t xbzrle_pages;
217 f36d55af Orit Wasserman
    uint64_t xbzrle_cache_miss;
218 f36d55af Orit Wasserman
    uint64_t xbzrle_overflows;
219 004d4c10 Orit Wasserman
} AccountingInfo;
220 004d4c10 Orit Wasserman
221 004d4c10 Orit Wasserman
static AccountingInfo acct_info;
222 004d4c10 Orit Wasserman
223 004d4c10 Orit Wasserman
static void acct_clear(void)
224 004d4c10 Orit Wasserman
{
225 004d4c10 Orit Wasserman
    memset(&acct_info, 0, sizeof(acct_info));
226 004d4c10 Orit Wasserman
}
227 004d4c10 Orit Wasserman
228 004d4c10 Orit Wasserman
uint64_t dup_mig_bytes_transferred(void)
229 004d4c10 Orit Wasserman
{
230 004d4c10 Orit Wasserman
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
231 004d4c10 Orit Wasserman
}
232 004d4c10 Orit Wasserman
233 004d4c10 Orit Wasserman
uint64_t dup_mig_pages_transferred(void)
234 004d4c10 Orit Wasserman
{
235 004d4c10 Orit Wasserman
    return acct_info.dup_pages;
236 004d4c10 Orit Wasserman
}
237 004d4c10 Orit Wasserman
238 004d4c10 Orit Wasserman
uint64_t norm_mig_bytes_transferred(void)
239 004d4c10 Orit Wasserman
{
240 004d4c10 Orit Wasserman
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
241 004d4c10 Orit Wasserman
}
242 004d4c10 Orit Wasserman
243 004d4c10 Orit Wasserman
uint64_t norm_mig_pages_transferred(void)
244 004d4c10 Orit Wasserman
{
245 004d4c10 Orit Wasserman
    return acct_info.norm_pages;
246 004d4c10 Orit Wasserman
}
247 004d4c10 Orit Wasserman
248 f36d55af Orit Wasserman
uint64_t xbzrle_mig_bytes_transferred(void)
249 f36d55af Orit Wasserman
{
250 f36d55af Orit Wasserman
    return acct_info.xbzrle_bytes;
251 f36d55af Orit Wasserman
}
252 f36d55af Orit Wasserman
253 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_transferred(void)
254 f36d55af Orit Wasserman
{
255 f36d55af Orit Wasserman
    return acct_info.xbzrle_pages;
256 f36d55af Orit Wasserman
}
257 f36d55af Orit Wasserman
258 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_cache_miss(void)
259 f36d55af Orit Wasserman
{
260 f36d55af Orit Wasserman
    return acct_info.xbzrle_cache_miss;
261 f36d55af Orit Wasserman
}
262 f36d55af Orit Wasserman
263 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_overflow(void)
264 f36d55af Orit Wasserman
{
265 f36d55af Orit Wasserman
    return acct_info.xbzrle_overflows;
266 f36d55af Orit Wasserman
}
267 f36d55af Orit Wasserman
268 0c51f43d Orit Wasserman
static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
269 0c51f43d Orit Wasserman
        int cont, int flag)
270 0c51f43d Orit Wasserman
{
271 0c51f43d Orit Wasserman
        qemu_put_be64(f, offset | cont | flag);
272 0c51f43d Orit Wasserman
        if (!cont) {
273 0c51f43d Orit Wasserman
                qemu_put_byte(f, strlen(block->idstr));
274 0c51f43d Orit Wasserman
                qemu_put_buffer(f, (uint8_t *)block->idstr,
275 0c51f43d Orit Wasserman
                                strlen(block->idstr));
276 0c51f43d Orit Wasserman
        }
277 0c51f43d Orit Wasserman
278 0c51f43d Orit Wasserman
}
279 0c51f43d Orit Wasserman
280 17ad9b35 Orit Wasserman
#define ENCODING_FLAG_XBZRLE 0x1
281 17ad9b35 Orit Wasserman
282 17ad9b35 Orit Wasserman
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
283 17ad9b35 Orit Wasserman
                            ram_addr_t current_addr, RAMBlock *block,
284 dd051c72 Juan Quintela
                            ram_addr_t offset, int cont, bool last_stage)
285 17ad9b35 Orit Wasserman
{
286 17ad9b35 Orit Wasserman
    int encoded_len = 0, bytes_sent = -1;
287 17ad9b35 Orit Wasserman
    uint8_t *prev_cached_page;
288 17ad9b35 Orit Wasserman
289 17ad9b35 Orit Wasserman
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
290 dd051c72 Juan Quintela
        if (!last_stage) {
291 dd051c72 Juan Quintela
            cache_insert(XBZRLE.cache, current_addr,
292 dd051c72 Juan Quintela
                         g_memdup(current_data, TARGET_PAGE_SIZE));
293 dd051c72 Juan Quintela
        }
294 f36d55af Orit Wasserman
        acct_info.xbzrle_cache_miss++;
295 17ad9b35 Orit Wasserman
        return -1;
296 17ad9b35 Orit Wasserman
    }
297 17ad9b35 Orit Wasserman
298 17ad9b35 Orit Wasserman
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
299 17ad9b35 Orit Wasserman
300 17ad9b35 Orit Wasserman
    /* save current buffer into memory */
301 17ad9b35 Orit Wasserman
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
302 17ad9b35 Orit Wasserman
303 17ad9b35 Orit Wasserman
    /* XBZRLE encoding (if there is no overflow) */
304 17ad9b35 Orit Wasserman
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
305 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
306 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE);
307 17ad9b35 Orit Wasserman
    if (encoded_len == 0) {
308 17ad9b35 Orit Wasserman
        DPRINTF("Skipping unmodified page\n");
309 17ad9b35 Orit Wasserman
        return 0;
310 17ad9b35 Orit Wasserman
    } else if (encoded_len == -1) {
311 17ad9b35 Orit Wasserman
        DPRINTF("Overflow\n");
312 f36d55af Orit Wasserman
        acct_info.xbzrle_overflows++;
313 17ad9b35 Orit Wasserman
        /* update data in the cache */
314 17ad9b35 Orit Wasserman
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
315 17ad9b35 Orit Wasserman
        return -1;
316 17ad9b35 Orit Wasserman
    }
317 17ad9b35 Orit Wasserman
318 17ad9b35 Orit Wasserman
    /* we need to update the data in the cache, in order to get the same data */
319 dd051c72 Juan Quintela
    if (!last_stage) {
320 dd051c72 Juan Quintela
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
321 dd051c72 Juan Quintela
    }
322 17ad9b35 Orit Wasserman
323 17ad9b35 Orit Wasserman
    /* Send XBZRLE based compressed page */
324 17ad9b35 Orit Wasserman
    save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
325 17ad9b35 Orit Wasserman
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
326 17ad9b35 Orit Wasserman
    qemu_put_be16(f, encoded_len);
327 17ad9b35 Orit Wasserman
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
328 17ad9b35 Orit Wasserman
    bytes_sent = encoded_len + 1 + 2;
329 f36d55af Orit Wasserman
    acct_info.xbzrle_pages++;
330 f36d55af Orit Wasserman
    acct_info.xbzrle_bytes += bytes_sent;
331 17ad9b35 Orit Wasserman
332 17ad9b35 Orit Wasserman
    return bytes_sent;
333 17ad9b35 Orit Wasserman
}
334 17ad9b35 Orit Wasserman
335 760e77ea Alex Williamson
static RAMBlock *last_block;
336 760e77ea Alex Williamson
static ram_addr_t last_offset;
337 c6bf8e0e Juan Quintela
static unsigned long *migration_bitmap;
338 c6bf8e0e Juan Quintela
static uint64_t migration_dirty_pages;
339 f798b07f Umesh Deshpande
static uint32_t last_version;
340 760e77ea Alex Williamson
341 69268cde Juan Quintela
static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
342 69268cde Juan Quintela
                                                         ram_addr_t offset)
343 69268cde Juan Quintela
{
344 c6bf8e0e Juan Quintela
    bool ret;
345 c6bf8e0e Juan Quintela
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
346 c6bf8e0e Juan Quintela
347 c6bf8e0e Juan Quintela
    ret = test_and_clear_bit(nr, migration_bitmap);
348 69268cde Juan Quintela
349 69268cde Juan Quintela
    if (ret) {
350 c6bf8e0e Juan Quintela
        migration_dirty_pages--;
351 69268cde Juan Quintela
    }
352 69268cde Juan Quintela
    return ret;
353 69268cde Juan Quintela
}
354 69268cde Juan Quintela
355 c6bf8e0e Juan Quintela
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
356 c6bf8e0e Juan Quintela
                                              ram_addr_t offset)
357 e44d26c8 Juan Quintela
{
358 c6bf8e0e Juan Quintela
    bool ret;
359 c6bf8e0e Juan Quintela
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
360 e44d26c8 Juan Quintela
361 c6bf8e0e Juan Quintela
    ret = test_and_set_bit(nr, migration_bitmap);
362 c6bf8e0e Juan Quintela
363 c6bf8e0e Juan Quintela
    if (!ret) {
364 c6bf8e0e Juan Quintela
        migration_dirty_pages++;
365 e44d26c8 Juan Quintela
    }
366 c6bf8e0e Juan Quintela
    return ret;
367 e44d26c8 Juan Quintela
}
368 e44d26c8 Juan Quintela
369 dd2df737 Juan Quintela
static void migration_bitmap_sync(void)
370 dd2df737 Juan Quintela
{
371 c6bf8e0e Juan Quintela
    RAMBlock *block;
372 c6bf8e0e Juan Quintela
    ram_addr_t addr;
373 c6bf8e0e Juan Quintela
    uint64_t num_dirty_pages_init = migration_dirty_pages;
374 8d017193 Juan Quintela
    MigrationState *s = migrate_get_current();
375 8d017193 Juan Quintela
    static int64_t start_time;
376 8d017193 Juan Quintela
    static int64_t num_dirty_pages_period;
377 8d017193 Juan Quintela
    int64_t end_time;
378 8d017193 Juan Quintela
379 8d017193 Juan Quintela
    if (!start_time) {
380 8d017193 Juan Quintela
        start_time = qemu_get_clock_ms(rt_clock);
381 8d017193 Juan Quintela
    }
382 3c12193d Juan Quintela
383 3c12193d Juan Quintela
    trace_migration_bitmap_sync_start();
384 dd2df737 Juan Quintela
    memory_global_sync_dirty_bitmap(get_system_memory());
385 c6bf8e0e Juan Quintela
386 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
387 c6bf8e0e Juan Quintela
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
388 c6bf8e0e Juan Quintela
            if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
389 c6bf8e0e Juan Quintela
                                        DIRTY_MEMORY_MIGRATION)) {
390 c6bf8e0e Juan Quintela
                migration_bitmap_set_dirty(block->mr, addr);
391 c6bf8e0e Juan Quintela
            }
392 c6bf8e0e Juan Quintela
        }
393 c6bf8e0e Juan Quintela
        memory_region_reset_dirty(block->mr, 0, block->length,
394 c6bf8e0e Juan Quintela
                                  DIRTY_MEMORY_MIGRATION);
395 c6bf8e0e Juan Quintela
    }
396 c6bf8e0e Juan Quintela
    trace_migration_bitmap_sync_end(migration_dirty_pages
397 3c12193d Juan Quintela
                                    - num_dirty_pages_init);
398 8d017193 Juan Quintela
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
399 8d017193 Juan Quintela
    end_time = qemu_get_clock_ms(rt_clock);
400 8d017193 Juan Quintela
401 8d017193 Juan Quintela
    /* more than 1 second = 1000 millisecons */
402 8d017193 Juan Quintela
    if (end_time > start_time + 1000) {
403 8d017193 Juan Quintela
        s->dirty_pages_rate = num_dirty_pages_period * 1000
404 8d017193 Juan Quintela
            / (end_time - start_time);
405 8d017193 Juan Quintela
        start_time = end_time;
406 8d017193 Juan Quintela
        num_dirty_pages_period = 0;
407 8d017193 Juan Quintela
    }
408 dd2df737 Juan Quintela
}
409 dd2df737 Juan Quintela
410 6c779f22 Orit Wasserman
/*
411 6c779f22 Orit Wasserman
 * ram_save_block: Writes a page of memory to the stream f
412 6c779f22 Orit Wasserman
 *
413 6c779f22 Orit Wasserman
 * Returns:  0: if the page hasn't changed
414 6c779f22 Orit Wasserman
 *          -1: if there are no more dirty pages
415 6c779f22 Orit Wasserman
 *           n: the amount of bytes written in other case
416 6c779f22 Orit Wasserman
 */
417 6c779f22 Orit Wasserman
418 dd051c72 Juan Quintela
static int ram_save_block(QEMUFile *f, bool last_stage)
419 ad96090a Blue Swirl
{
420 e44359c3 Alex Williamson
    RAMBlock *block = last_block;
421 e44359c3 Alex Williamson
    ram_addr_t offset = last_offset;
422 6c779f22 Orit Wasserman
    int bytes_sent = -1;
423 71c510e2 Avi Kivity
    MemoryRegion *mr;
424 17ad9b35 Orit Wasserman
    ram_addr_t current_addr;
425 ad96090a Blue Swirl
426 e44359c3 Alex Williamson
    if (!block)
427 a3161038 Paolo Bonzini
        block = QTAILQ_FIRST(&ram_list.blocks);
428 e44359c3 Alex Williamson
429 e44359c3 Alex Williamson
    do {
430 71c510e2 Avi Kivity
        mr = block->mr;
431 69268cde Juan Quintela
        if (migration_bitmap_test_and_reset_dirty(mr, offset)) {
432 ad96090a Blue Swirl
            uint8_t *p;
433 a55bbe31 Alex Williamson
            int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
434 ad96090a Blue Swirl
435 71c510e2 Avi Kivity
            p = memory_region_get_ram_ptr(mr) + offset;
436 ad96090a Blue Swirl
437 86003615 Paolo Bonzini
            if (is_dup_page(p)) {
438 004d4c10 Orit Wasserman
                acct_info.dup_pages++;
439 0c51f43d Orit Wasserman
                save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
440 ad96090a Blue Swirl
                qemu_put_byte(f, *p);
441 3fc250b4 Pierre Riteau
                bytes_sent = 1;
442 17ad9b35 Orit Wasserman
            } else if (migrate_use_xbzrle()) {
443 17ad9b35 Orit Wasserman
                current_addr = block->offset + offset;
444 17ad9b35 Orit Wasserman
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
445 dd051c72 Juan Quintela
                                              offset, cont, last_stage);
446 dd051c72 Juan Quintela
                if (!last_stage) {
447 dd051c72 Juan Quintela
                    p = get_cached_data(XBZRLE.cache, current_addr);
448 dd051c72 Juan Quintela
                }
449 17ad9b35 Orit Wasserman
            }
450 17ad9b35 Orit Wasserman
451 17ad9b35 Orit Wasserman
            /* either we didn't send yet (we may have had XBZRLE overflow) */
452 17ad9b35 Orit Wasserman
            if (bytes_sent == -1) {
453 0c51f43d Orit Wasserman
                save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
454 ad96090a Blue Swirl
                qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
455 3fc250b4 Pierre Riteau
                bytes_sent = TARGET_PAGE_SIZE;
456 004d4c10 Orit Wasserman
                acct_info.norm_pages++;
457 ad96090a Blue Swirl
            }
458 ad96090a Blue Swirl
459 17ad9b35 Orit Wasserman
            /* if page is unmodified, continue to the next */
460 17ad9b35 Orit Wasserman
            if (bytes_sent != 0) {
461 17ad9b35 Orit Wasserman
                break;
462 17ad9b35 Orit Wasserman
            }
463 ad96090a Blue Swirl
        }
464 e44359c3 Alex Williamson
465 e44359c3 Alex Williamson
        offset += TARGET_PAGE_SIZE;
466 e44359c3 Alex Williamson
        if (offset >= block->length) {
467 e44359c3 Alex Williamson
            offset = 0;
468 a3161038 Paolo Bonzini
            block = QTAILQ_NEXT(block, next);
469 e44359c3 Alex Williamson
            if (!block)
470 a3161038 Paolo Bonzini
                block = QTAILQ_FIRST(&ram_list.blocks);
471 e44359c3 Alex Williamson
        }
472 71c510e2 Avi Kivity
    } while (block != last_block || offset != last_offset);
473 e44359c3 Alex Williamson
474 e44359c3 Alex Williamson
    last_block = block;
475 e44359c3 Alex Williamson
    last_offset = offset;
476 ad96090a Blue Swirl
477 3fc250b4 Pierre Riteau
    return bytes_sent;
478 ad96090a Blue Swirl
}
479 ad96090a Blue Swirl
480 ad96090a Blue Swirl
static uint64_t bytes_transferred;
481 ad96090a Blue Swirl
482 ad96090a Blue Swirl
static ram_addr_t ram_save_remaining(void)
483 ad96090a Blue Swirl
{
484 c6bf8e0e Juan Quintela
    return migration_dirty_pages;
485 ad96090a Blue Swirl
}
486 ad96090a Blue Swirl
487 ad96090a Blue Swirl
uint64_t ram_bytes_remaining(void)
488 ad96090a Blue Swirl
{
489 ad96090a Blue Swirl
    return ram_save_remaining() * TARGET_PAGE_SIZE;
490 ad96090a Blue Swirl
}
491 ad96090a Blue Swirl
492 ad96090a Blue Swirl
uint64_t ram_bytes_transferred(void)
493 ad96090a Blue Swirl
{
494 ad96090a Blue Swirl
    return bytes_transferred;
495 ad96090a Blue Swirl
}
496 ad96090a Blue Swirl
497 ad96090a Blue Swirl
uint64_t ram_bytes_total(void)
498 ad96090a Blue Swirl
{
499 d17b5288 Alex Williamson
    RAMBlock *block;
500 d17b5288 Alex Williamson
    uint64_t total = 0;
501 d17b5288 Alex Williamson
502 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
503 d17b5288 Alex Williamson
        total += block->length;
504 d17b5288 Alex Williamson
505 d17b5288 Alex Williamson
    return total;
506 ad96090a Blue Swirl
}
507 ad96090a Blue Swirl
508 8e21cd32 Orit Wasserman
static void migration_end(void)
509 8e21cd32 Orit Wasserman
{
510 244eaa75 Paolo Bonzini
    if (migration_bitmap) {
511 244eaa75 Paolo Bonzini
        memory_global_dirty_log_stop();
512 244eaa75 Paolo Bonzini
        g_free(migration_bitmap);
513 244eaa75 Paolo Bonzini
        migration_bitmap = NULL;
514 244eaa75 Paolo Bonzini
    }
515 17ad9b35 Orit Wasserman
516 244eaa75 Paolo Bonzini
    if (XBZRLE.cache) {
517 17ad9b35 Orit Wasserman
        cache_fini(XBZRLE.cache);
518 17ad9b35 Orit Wasserman
        g_free(XBZRLE.cache);
519 17ad9b35 Orit Wasserman
        g_free(XBZRLE.encoded_buf);
520 17ad9b35 Orit Wasserman
        g_free(XBZRLE.current_buf);
521 17ad9b35 Orit Wasserman
        g_free(XBZRLE.decoded_buf);
522 17ad9b35 Orit Wasserman
        XBZRLE.cache = NULL;
523 17ad9b35 Orit Wasserman
    }
524 8e21cd32 Orit Wasserman
}
525 8e21cd32 Orit Wasserman
526 9b5bfab0 Juan Quintela
static void ram_migration_cancel(void *opaque)
527 9b5bfab0 Juan Quintela
{
528 9b5bfab0 Juan Quintela
    migration_end();
529 9b5bfab0 Juan Quintela
}
530 9b5bfab0 Juan Quintela
531 5a170775 Juan Quintela
static void reset_ram_globals(void)
532 5a170775 Juan Quintela
{
533 5a170775 Juan Quintela
    last_block = NULL;
534 5a170775 Juan Quintela
    last_offset = 0;
535 f798b07f Umesh Deshpande
    last_version = ram_list.version;
536 5a170775 Juan Quintela
}
537 5a170775 Juan Quintela
538 4508bd9e Juan Quintela
#define MAX_WAIT 50 /* ms, half buffered_file limit */
539 4508bd9e Juan Quintela
540 d1315aac Juan Quintela
static int ram_save_setup(QEMUFile *f, void *opaque)
541 ad96090a Blue Swirl
{
542 d1315aac Juan Quintela
    RAMBlock *block;
543 c6bf8e0e Juan Quintela
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
544 c6bf8e0e Juan Quintela
545 c6bf8e0e Juan Quintela
    migration_bitmap = bitmap_new(ram_pages);
546 7ec81e56 David Gibson
    bitmap_set(migration_bitmap, 0, ram_pages);
547 c6bf8e0e Juan Quintela
    migration_dirty_pages = ram_pages;
548 ad96090a Blue Swirl
549 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
550 d1315aac Juan Quintela
    bytes_transferred = 0;
551 5a170775 Juan Quintela
    reset_ram_globals();
552 ad96090a Blue Swirl
553 17ad9b35 Orit Wasserman
    if (migrate_use_xbzrle()) {
554 17ad9b35 Orit Wasserman
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
555 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE,
556 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE);
557 17ad9b35 Orit Wasserman
        if (!XBZRLE.cache) {
558 17ad9b35 Orit Wasserman
            DPRINTF("Error creating cache\n");
559 17ad9b35 Orit Wasserman
            return -1;
560 17ad9b35 Orit Wasserman
        }
561 17ad9b35 Orit Wasserman
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
562 17ad9b35 Orit Wasserman
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
563 004d4c10 Orit Wasserman
        acct_clear();
564 17ad9b35 Orit Wasserman
    }
565 17ad9b35 Orit Wasserman
566 d1315aac Juan Quintela
    memory_global_dirty_log_start();
567 c6bf8e0e Juan Quintela
    migration_bitmap_sync();
568 ad96090a Blue Swirl
569 d1315aac Juan Quintela
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
570 97ab12d4 Alex Williamson
571 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
572 d1315aac Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
573 d1315aac Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
574 d1315aac Juan Quintela
        qemu_put_be64(f, block->length);
575 ad96090a Blue Swirl
    }
576 ad96090a Blue Swirl
577 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
578 d1315aac Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
579 d1315aac Juan Quintela
580 d1315aac Juan Quintela
    return 0;
581 d1315aac Juan Quintela
}
582 d1315aac Juan Quintela
583 16310a3c Juan Quintela
static int ram_save_iterate(QEMUFile *f, void *opaque)
584 d1315aac Juan Quintela
{
585 d1315aac Juan Quintela
    uint64_t bytes_transferred_last;
586 d1315aac Juan Quintela
    double bwidth = 0;
587 d1315aac Juan Quintela
    int ret;
588 d1315aac Juan Quintela
    int i;
589 c00012f6 Juan Quintela
    uint64_t expected_downtime;
590 2c52ddf1 Juan Quintela
    MigrationState *s = migrate_get_current();
591 d1315aac Juan Quintela
592 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
593 b2a8658e Umesh Deshpande
594 f798b07f Umesh Deshpande
    if (ram_list.version != last_version) {
595 f798b07f Umesh Deshpande
        reset_ram_globals();
596 f798b07f Umesh Deshpande
    }
597 f798b07f Umesh Deshpande
598 ad96090a Blue Swirl
    bytes_transferred_last = bytes_transferred;
599 ad96090a Blue Swirl
    bwidth = qemu_get_clock_ns(rt_clock);
600 ad96090a Blue Swirl
601 4508bd9e Juan Quintela
    i = 0;
602 2975725f Juan Quintela
    while ((ret = qemu_file_rate_limit(f)) == 0) {
603 3fc250b4 Pierre Riteau
        int bytes_sent;
604 ad96090a Blue Swirl
605 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, false);
606 6c779f22 Orit Wasserman
        /* no more blocks to sent */
607 6c779f22 Orit Wasserman
        if (bytes_sent < 0) {
608 ad96090a Blue Swirl
            break;
609 ad96090a Blue Swirl
        }
610 6c779f22 Orit Wasserman
        bytes_transferred += bytes_sent;
611 004d4c10 Orit Wasserman
        acct_info.iterations++;
612 4508bd9e Juan Quintela
        /* we want to check in the 1st loop, just in case it was the 1st time
613 4508bd9e Juan Quintela
           and we had to sync the dirty bitmap.
614 4508bd9e Juan Quintela
           qemu_get_clock_ns() is a bit expensive, so we only check each some
615 4508bd9e Juan Quintela
           iterations
616 4508bd9e Juan Quintela
        */
617 4508bd9e Juan Quintela
        if ((i & 63) == 0) {
618 4508bd9e Juan Quintela
            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
619 4508bd9e Juan Quintela
            if (t1 > MAX_WAIT) {
620 ef37a699 Igor Mitsyanko
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
621 4508bd9e Juan Quintela
                        t1, i);
622 4508bd9e Juan Quintela
                break;
623 4508bd9e Juan Quintela
            }
624 4508bd9e Juan Quintela
        }
625 4508bd9e Juan Quintela
        i++;
626 ad96090a Blue Swirl
    }
627 ad96090a Blue Swirl
628 2975725f Juan Quintela
    if (ret < 0) {
629 2975725f Juan Quintela
        return ret;
630 2975725f Juan Quintela
    }
631 2975725f Juan Quintela
632 ad96090a Blue Swirl
    bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
633 ad96090a Blue Swirl
    bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
634 ad96090a Blue Swirl
635 c00012f6 Juan Quintela
    /* if we haven't transferred anything this round, force
636 c00012f6 Juan Quintela
     * expected_downtime to a very high value, but without
637 c00012f6 Juan Quintela
     * crashing */
638 ad96090a Blue Swirl
    if (bwidth == 0) {
639 ad96090a Blue Swirl
        bwidth = 0.000001;
640 ad96090a Blue Swirl
    }
641 ad96090a Blue Swirl
642 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
643 16310a3c Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
644 16310a3c Juan Quintela
645 c00012f6 Juan Quintela
    expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
646 c00012f6 Juan Quintela
    DPRINTF("ram_save_live: expected(%" PRIu64 ") <= max(" PRIu64 ")?\n",
647 c00012f6 Juan Quintela
            expected_downtime, migrate_max_downtime());
648 16310a3c Juan Quintela
649 c00012f6 Juan Quintela
    if (expected_downtime <= migrate_max_downtime()) {
650 dd2df737 Juan Quintela
        migration_bitmap_sync();
651 c00012f6 Juan Quintela
        expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
652 2c52ddf1 Juan Quintela
        s->expected_downtime = expected_downtime / 1000000; /* ns -> ms */
653 00d94f3f Juan Quintela
654 c00012f6 Juan Quintela
        return expected_downtime <= migrate_max_downtime();
655 00d94f3f Juan Quintela
    }
656 00d94f3f Juan Quintela
    return 0;
657 16310a3c Juan Quintela
}
658 16310a3c Juan Quintela
659 16310a3c Juan Quintela
static int ram_save_complete(QEMUFile *f, void *opaque)
660 16310a3c Juan Quintela
{
661 dd2df737 Juan Quintela
    migration_bitmap_sync();
662 16310a3c Juan Quintela
663 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
664 b2a8658e Umesh Deshpande
665 ad96090a Blue Swirl
    /* try transferring iterative blocks of memory */
666 3a697f69 Orit Wasserman
667 16310a3c Juan Quintela
    /* flush all remaining blocks regardless of rate limiting */
668 6c779f22 Orit Wasserman
    while (true) {
669 3fc250b4 Pierre Riteau
        int bytes_sent;
670 3fc250b4 Pierre Riteau
671 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, true);
672 6c779f22 Orit Wasserman
        /* no more blocks to sent */
673 6c779f22 Orit Wasserman
        if (bytes_sent < 0) {
674 6c779f22 Orit Wasserman
            break;
675 ad96090a Blue Swirl
        }
676 16310a3c Juan Quintela
        bytes_transferred += bytes_sent;
677 ad96090a Blue Swirl
    }
678 244eaa75 Paolo Bonzini
    migration_end();
679 ad96090a Blue Swirl
680 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
681 ad96090a Blue Swirl
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
682 ad96090a Blue Swirl
683 5b3c9638 Juan Quintela
    return 0;
684 ad96090a Blue Swirl
}
685 ad96090a Blue Swirl
686 17ad9b35 Orit Wasserman
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
687 17ad9b35 Orit Wasserman
{
688 17ad9b35 Orit Wasserman
    int ret, rc = 0;
689 17ad9b35 Orit Wasserman
    unsigned int xh_len;
690 17ad9b35 Orit Wasserman
    int xh_flags;
691 17ad9b35 Orit Wasserman
692 17ad9b35 Orit Wasserman
    if (!XBZRLE.decoded_buf) {
693 17ad9b35 Orit Wasserman
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
694 17ad9b35 Orit Wasserman
    }
695 17ad9b35 Orit Wasserman
696 17ad9b35 Orit Wasserman
    /* extract RLE header */
697 17ad9b35 Orit Wasserman
    xh_flags = qemu_get_byte(f);
698 17ad9b35 Orit Wasserman
    xh_len = qemu_get_be16(f);
699 17ad9b35 Orit Wasserman
700 17ad9b35 Orit Wasserman
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
701 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
702 17ad9b35 Orit Wasserman
        return -1;
703 17ad9b35 Orit Wasserman
    }
704 17ad9b35 Orit Wasserman
705 17ad9b35 Orit Wasserman
    if (xh_len > TARGET_PAGE_SIZE) {
706 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
707 17ad9b35 Orit Wasserman
        return -1;
708 17ad9b35 Orit Wasserman
    }
709 17ad9b35 Orit Wasserman
    /* load data and decode */
710 17ad9b35 Orit Wasserman
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
711 17ad9b35 Orit Wasserman
712 17ad9b35 Orit Wasserman
    /* decode RLE */
713 17ad9b35 Orit Wasserman
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
714 17ad9b35 Orit Wasserman
                               TARGET_PAGE_SIZE);
715 17ad9b35 Orit Wasserman
    if (ret == -1) {
716 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
717 17ad9b35 Orit Wasserman
        rc = -1;
718 17ad9b35 Orit Wasserman
    } else  if (ret > TARGET_PAGE_SIZE) {
719 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
720 17ad9b35 Orit Wasserman
                ret, TARGET_PAGE_SIZE);
721 17ad9b35 Orit Wasserman
        abort();
722 17ad9b35 Orit Wasserman
    }
723 17ad9b35 Orit Wasserman
724 17ad9b35 Orit Wasserman
    return rc;
725 17ad9b35 Orit Wasserman
}
726 17ad9b35 Orit Wasserman
727 a55bbe31 Alex Williamson
static inline void *host_from_stream_offset(QEMUFile *f,
728 a55bbe31 Alex Williamson
                                            ram_addr_t offset,
729 a55bbe31 Alex Williamson
                                            int flags)
730 a55bbe31 Alex Williamson
{
731 a55bbe31 Alex Williamson
    static RAMBlock *block = NULL;
732 a55bbe31 Alex Williamson
    char id[256];
733 a55bbe31 Alex Williamson
    uint8_t len;
734 a55bbe31 Alex Williamson
735 a55bbe31 Alex Williamson
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
736 a55bbe31 Alex Williamson
        if (!block) {
737 a55bbe31 Alex Williamson
            fprintf(stderr, "Ack, bad migration stream!\n");
738 a55bbe31 Alex Williamson
            return NULL;
739 a55bbe31 Alex Williamson
        }
740 a55bbe31 Alex Williamson
741 dc94a7ed Avi Kivity
        return memory_region_get_ram_ptr(block->mr) + offset;
742 a55bbe31 Alex Williamson
    }
743 a55bbe31 Alex Williamson
744 a55bbe31 Alex Williamson
    len = qemu_get_byte(f);
745 a55bbe31 Alex Williamson
    qemu_get_buffer(f, (uint8_t *)id, len);
746 a55bbe31 Alex Williamson
    id[len] = 0;
747 a55bbe31 Alex Williamson
748 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
749 a55bbe31 Alex Williamson
        if (!strncmp(id, block->idstr, sizeof(id)))
750 dc94a7ed Avi Kivity
            return memory_region_get_ram_ptr(block->mr) + offset;
751 a55bbe31 Alex Williamson
    }
752 a55bbe31 Alex Williamson
753 a55bbe31 Alex Williamson
    fprintf(stderr, "Can't find block %s!\n", id);
754 a55bbe31 Alex Williamson
    return NULL;
755 a55bbe31 Alex Williamson
}
756 a55bbe31 Alex Williamson
757 7908c78d Juan Quintela
static int ram_load(QEMUFile *f, void *opaque, int version_id)
758 ad96090a Blue Swirl
{
759 ad96090a Blue Swirl
    ram_addr_t addr;
760 3a697f69 Orit Wasserman
    int flags, ret = 0;
761 42802d47 Juan Quintela
    int error;
762 3a697f69 Orit Wasserman
    static uint64_t seq_iter;
763 3a697f69 Orit Wasserman
764 3a697f69 Orit Wasserman
    seq_iter++;
765 ad96090a Blue Swirl
766 f09f2189 Avi Kivity
    if (version_id < 4 || version_id > 4) {
767 ad96090a Blue Swirl
        return -EINVAL;
768 ad96090a Blue Swirl
    }
769 ad96090a Blue Swirl
770 ad96090a Blue Swirl
    do {
771 ad96090a Blue Swirl
        addr = qemu_get_be64(f);
772 ad96090a Blue Swirl
773 ad96090a Blue Swirl
        flags = addr & ~TARGET_PAGE_MASK;
774 ad96090a Blue Swirl
        addr &= TARGET_PAGE_MASK;
775 ad96090a Blue Swirl
776 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
777 f09f2189 Avi Kivity
            if (version_id == 4) {
778 97ab12d4 Alex Williamson
                /* Synchronize RAM block list */
779 97ab12d4 Alex Williamson
                char id[256];
780 97ab12d4 Alex Williamson
                ram_addr_t length;
781 97ab12d4 Alex Williamson
                ram_addr_t total_ram_bytes = addr;
782 97ab12d4 Alex Williamson
783 97ab12d4 Alex Williamson
                while (total_ram_bytes) {
784 97ab12d4 Alex Williamson
                    RAMBlock *block;
785 97ab12d4 Alex Williamson
                    uint8_t len;
786 97ab12d4 Alex Williamson
787 97ab12d4 Alex Williamson
                    len = qemu_get_byte(f);
788 97ab12d4 Alex Williamson
                    qemu_get_buffer(f, (uint8_t *)id, len);
789 97ab12d4 Alex Williamson
                    id[len] = 0;
790 97ab12d4 Alex Williamson
                    length = qemu_get_be64(f);
791 97ab12d4 Alex Williamson
792 a3161038 Paolo Bonzini
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
793 97ab12d4 Alex Williamson
                        if (!strncmp(id, block->idstr, sizeof(id))) {
794 3a697f69 Orit Wasserman
                            if (block->length != length) {
795 3a697f69 Orit Wasserman
                                ret =  -EINVAL;
796 3a697f69 Orit Wasserman
                                goto done;
797 3a697f69 Orit Wasserman
                            }
798 97ab12d4 Alex Williamson
                            break;
799 97ab12d4 Alex Williamson
                        }
800 97ab12d4 Alex Williamson
                    }
801 97ab12d4 Alex Williamson
802 97ab12d4 Alex Williamson
                    if (!block) {
803 fb787f81 Alex Williamson
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
804 fb787f81 Alex Williamson
                                "accept migration\n", id);
805 3a697f69 Orit Wasserman
                        ret = -EINVAL;
806 3a697f69 Orit Wasserman
                        goto done;
807 97ab12d4 Alex Williamson
                    }
808 97ab12d4 Alex Williamson
809 97ab12d4 Alex Williamson
                    total_ram_bytes -= length;
810 97ab12d4 Alex Williamson
                }
811 ad96090a Blue Swirl
            }
812 ad96090a Blue Swirl
        }
813 ad96090a Blue Swirl
814 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
815 97ab12d4 Alex Williamson
            void *host;
816 97ab12d4 Alex Williamson
            uint8_t ch;
817 97ab12d4 Alex Williamson
818 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
819 492fb99c Michael S. Tsirkin
            if (!host) {
820 492fb99c Michael S. Tsirkin
                return -EINVAL;
821 492fb99c Michael S. Tsirkin
            }
822 97ab12d4 Alex Williamson
823 97ab12d4 Alex Williamson
            ch = qemu_get_byte(f);
824 97ab12d4 Alex Williamson
            memset(host, ch, TARGET_PAGE_SIZE);
825 ad96090a Blue Swirl
#ifndef _WIN32
826 ad96090a Blue Swirl
            if (ch == 0 &&
827 45e6cee4 David Gibson
                (!kvm_enabled() || kvm_has_sync_mmu()) &&
828 45e6cee4 David Gibson
                getpagesize() <= TARGET_PAGE_SIZE) {
829 e78815a5 Andreas Färber
                qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
830 ad96090a Blue Swirl
            }
831 ad96090a Blue Swirl
#endif
832 ad96090a Blue Swirl
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
833 97ab12d4 Alex Williamson
            void *host;
834 97ab12d4 Alex Williamson
835 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
836 0ff1f9f5 Orit Wasserman
            if (!host) {
837 0ff1f9f5 Orit Wasserman
                return -EINVAL;
838 0ff1f9f5 Orit Wasserman
            }
839 97ab12d4 Alex Williamson
840 97ab12d4 Alex Williamson
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
841 17ad9b35 Orit Wasserman
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
842 17ad9b35 Orit Wasserman
            if (!migrate_use_xbzrle()) {
843 17ad9b35 Orit Wasserman
                return -EINVAL;
844 17ad9b35 Orit Wasserman
            }
845 17ad9b35 Orit Wasserman
            void *host = host_from_stream_offset(f, addr, flags);
846 17ad9b35 Orit Wasserman
            if (!host) {
847 17ad9b35 Orit Wasserman
                return -EINVAL;
848 17ad9b35 Orit Wasserman
            }
849 17ad9b35 Orit Wasserman
850 17ad9b35 Orit Wasserman
            if (load_xbzrle(f, addr, host) < 0) {
851 17ad9b35 Orit Wasserman
                ret = -EINVAL;
852 17ad9b35 Orit Wasserman
                goto done;
853 17ad9b35 Orit Wasserman
            }
854 ad96090a Blue Swirl
        }
855 42802d47 Juan Quintela
        error = qemu_file_get_error(f);
856 42802d47 Juan Quintela
        if (error) {
857 3a697f69 Orit Wasserman
            ret = error;
858 3a697f69 Orit Wasserman
            goto done;
859 ad96090a Blue Swirl
        }
860 ad96090a Blue Swirl
    } while (!(flags & RAM_SAVE_FLAG_EOS));
861 ad96090a Blue Swirl
862 3a697f69 Orit Wasserman
done:
863 ef37a699 Igor Mitsyanko
    DPRINTF("Completed load of VM with exit code %d seq iteration "
864 ef37a699 Igor Mitsyanko
            "%" PRIu64 "\n", ret, seq_iter);
865 3a697f69 Orit Wasserman
    return ret;
866 ad96090a Blue Swirl
}
867 ad96090a Blue Swirl
868 7908c78d Juan Quintela
SaveVMHandlers savevm_ram_handlers = {
869 d1315aac Juan Quintela
    .save_live_setup = ram_save_setup,
870 16310a3c Juan Quintela
    .save_live_iterate = ram_save_iterate,
871 16310a3c Juan Quintela
    .save_live_complete = ram_save_complete,
872 7908c78d Juan Quintela
    .load_state = ram_load,
873 9b5bfab0 Juan Quintela
    .cancel = ram_migration_cancel,
874 7908c78d Juan Quintela
};
875 7908c78d Juan Quintela
876 ad96090a Blue Swirl
#ifdef HAS_AUDIO
877 0dfa5ef9 Isaku Yamahata
struct soundhw {
878 0dfa5ef9 Isaku Yamahata
    const char *name;
879 0dfa5ef9 Isaku Yamahata
    const char *descr;
880 0dfa5ef9 Isaku Yamahata
    int enabled;
881 0dfa5ef9 Isaku Yamahata
    int isa;
882 0dfa5ef9 Isaku Yamahata
    union {
883 4a0f031d Hervé Poussineau
        int (*init_isa) (ISABus *bus);
884 0dfa5ef9 Isaku Yamahata
        int (*init_pci) (PCIBus *bus);
885 0dfa5ef9 Isaku Yamahata
    } init;
886 0dfa5ef9 Isaku Yamahata
};
887 0dfa5ef9 Isaku Yamahata
888 0dfa5ef9 Isaku Yamahata
static struct soundhw soundhw[] = {
889 ad96090a Blue Swirl
#ifdef HAS_AUDIO_CHOICE
890 da12872a Hervé Poussineau
#ifdef CONFIG_PCSPK
891 ad96090a Blue Swirl
    {
892 ad96090a Blue Swirl
        "pcspk",
893 ad96090a Blue Swirl
        "PC speaker",
894 ad96090a Blue Swirl
        0,
895 ad96090a Blue Swirl
        1,
896 ad96090a Blue Swirl
        { .init_isa = pcspk_audio_init }
897 ad96090a Blue Swirl
    },
898 ad96090a Blue Swirl
#endif
899 ad96090a Blue Swirl
900 ad96090a Blue Swirl
#ifdef CONFIG_SB16
901 ad96090a Blue Swirl
    {
902 ad96090a Blue Swirl
        "sb16",
903 ad96090a Blue Swirl
        "Creative Sound Blaster 16",
904 ad96090a Blue Swirl
        0,
905 ad96090a Blue Swirl
        1,
906 ad96090a Blue Swirl
        { .init_isa = SB16_init }
907 ad96090a Blue Swirl
    },
908 ad96090a Blue Swirl
#endif
909 ad96090a Blue Swirl
910 ad96090a Blue Swirl
#ifdef CONFIG_CS4231A
911 ad96090a Blue Swirl
    {
912 ad96090a Blue Swirl
        "cs4231a",
913 ad96090a Blue Swirl
        "CS4231A",
914 ad96090a Blue Swirl
        0,
915 ad96090a Blue Swirl
        1,
916 ad96090a Blue Swirl
        { .init_isa = cs4231a_init }
917 ad96090a Blue Swirl
    },
918 ad96090a Blue Swirl
#endif
919 ad96090a Blue Swirl
920 ad96090a Blue Swirl
#ifdef CONFIG_ADLIB
921 ad96090a Blue Swirl
    {
922 ad96090a Blue Swirl
        "adlib",
923 ad96090a Blue Swirl
#ifdef HAS_YMF262
924 ad96090a Blue Swirl
        "Yamaha YMF262 (OPL3)",
925 ad96090a Blue Swirl
#else
926 ad96090a Blue Swirl
        "Yamaha YM3812 (OPL2)",
927 ad96090a Blue Swirl
#endif
928 ad96090a Blue Swirl
        0,
929 ad96090a Blue Swirl
        1,
930 ad96090a Blue Swirl
        { .init_isa = Adlib_init }
931 ad96090a Blue Swirl
    },
932 ad96090a Blue Swirl
#endif
933 ad96090a Blue Swirl
934 ad96090a Blue Swirl
#ifdef CONFIG_GUS
935 ad96090a Blue Swirl
    {
936 ad96090a Blue Swirl
        "gus",
937 ad96090a Blue Swirl
        "Gravis Ultrasound GF1",
938 ad96090a Blue Swirl
        0,
939 ad96090a Blue Swirl
        1,
940 ad96090a Blue Swirl
        { .init_isa = GUS_init }
941 ad96090a Blue Swirl
    },
942 ad96090a Blue Swirl
#endif
943 ad96090a Blue Swirl
944 ad96090a Blue Swirl
#ifdef CONFIG_AC97
945 ad96090a Blue Swirl
    {
946 ad96090a Blue Swirl
        "ac97",
947 ad96090a Blue Swirl
        "Intel 82801AA AC97 Audio",
948 ad96090a Blue Swirl
        0,
949 ad96090a Blue Swirl
        0,
950 ad96090a Blue Swirl
        { .init_pci = ac97_init }
951 ad96090a Blue Swirl
    },
952 ad96090a Blue Swirl
#endif
953 ad96090a Blue Swirl
954 ad96090a Blue Swirl
#ifdef CONFIG_ES1370
955 ad96090a Blue Swirl
    {
956 ad96090a Blue Swirl
        "es1370",
957 ad96090a Blue Swirl
        "ENSONIQ AudioPCI ES1370",
958 ad96090a Blue Swirl
        0,
959 ad96090a Blue Swirl
        0,
960 ad96090a Blue Swirl
        { .init_pci = es1370_init }
961 ad96090a Blue Swirl
    },
962 ad96090a Blue Swirl
#endif
963 ad96090a Blue Swirl
964 d61a4ce8 Gerd Hoffmann
#ifdef CONFIG_HDA
965 d61a4ce8 Gerd Hoffmann
    {
966 d61a4ce8 Gerd Hoffmann
        "hda",
967 d61a4ce8 Gerd Hoffmann
        "Intel HD Audio",
968 d61a4ce8 Gerd Hoffmann
        0,
969 d61a4ce8 Gerd Hoffmann
        0,
970 d61a4ce8 Gerd Hoffmann
        { .init_pci = intel_hda_and_codec_init }
971 d61a4ce8 Gerd Hoffmann
    },
972 d61a4ce8 Gerd Hoffmann
#endif
973 d61a4ce8 Gerd Hoffmann
974 ad96090a Blue Swirl
#endif /* HAS_AUDIO_CHOICE */
975 ad96090a Blue Swirl
976 ad96090a Blue Swirl
    { NULL, NULL, 0, 0, { NULL } }
977 ad96090a Blue Swirl
};
978 ad96090a Blue Swirl
979 ad96090a Blue Swirl
void select_soundhw(const char *optarg)
980 ad96090a Blue Swirl
{
981 ad96090a Blue Swirl
    struct soundhw *c;
982 ad96090a Blue Swirl
983 c8057f95 Peter Maydell
    if (is_help_option(optarg)) {
984 ad96090a Blue Swirl
    show_valid_cards:
985 ad96090a Blue Swirl
986 55d4fd3c Peter Maydell
#ifdef HAS_AUDIO_CHOICE
987 ad96090a Blue Swirl
        printf("Valid sound card names (comma separated):\n");
988 ad96090a Blue Swirl
        for (c = soundhw; c->name; ++c) {
989 ad96090a Blue Swirl
            printf ("%-11s %s\n", c->name, c->descr);
990 ad96090a Blue Swirl
        }
991 ad96090a Blue Swirl
        printf("\n-soundhw all will enable all of the above\n");
992 55d4fd3c Peter Maydell
#else
993 55d4fd3c Peter Maydell
        printf("Machine has no user-selectable audio hardware "
994 55d4fd3c Peter Maydell
               "(it may or may not have always-present audio hardware).\n");
995 55d4fd3c Peter Maydell
#endif
996 c8057f95 Peter Maydell
        exit(!is_help_option(optarg));
997 ad96090a Blue Swirl
    }
998 ad96090a Blue Swirl
    else {
999 ad96090a Blue Swirl
        size_t l;
1000 ad96090a Blue Swirl
        const char *p;
1001 ad96090a Blue Swirl
        char *e;
1002 ad96090a Blue Swirl
        int bad_card = 0;
1003 ad96090a Blue Swirl
1004 ad96090a Blue Swirl
        if (!strcmp(optarg, "all")) {
1005 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1006 ad96090a Blue Swirl
                c->enabled = 1;
1007 ad96090a Blue Swirl
            }
1008 ad96090a Blue Swirl
            return;
1009 ad96090a Blue Swirl
        }
1010 ad96090a Blue Swirl
1011 ad96090a Blue Swirl
        p = optarg;
1012 ad96090a Blue Swirl
        while (*p) {
1013 ad96090a Blue Swirl
            e = strchr(p, ',');
1014 ad96090a Blue Swirl
            l = !e ? strlen(p) : (size_t) (e - p);
1015 ad96090a Blue Swirl
1016 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1017 ad96090a Blue Swirl
                if (!strncmp(c->name, p, l) && !c->name[l]) {
1018 ad96090a Blue Swirl
                    c->enabled = 1;
1019 ad96090a Blue Swirl
                    break;
1020 ad96090a Blue Swirl
                }
1021 ad96090a Blue Swirl
            }
1022 ad96090a Blue Swirl
1023 ad96090a Blue Swirl
            if (!c->name) {
1024 ad96090a Blue Swirl
                if (l > 80) {
1025 ad96090a Blue Swirl
                    fprintf(stderr,
1026 ad96090a Blue Swirl
                            "Unknown sound card name (too big to show)\n");
1027 ad96090a Blue Swirl
                }
1028 ad96090a Blue Swirl
                else {
1029 ad96090a Blue Swirl
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
1030 ad96090a Blue Swirl
                            (int) l, p);
1031 ad96090a Blue Swirl
                }
1032 ad96090a Blue Swirl
                bad_card = 1;
1033 ad96090a Blue Swirl
            }
1034 ad96090a Blue Swirl
            p += l + (e != NULL);
1035 ad96090a Blue Swirl
        }
1036 ad96090a Blue Swirl
1037 ad96090a Blue Swirl
        if (bad_card) {
1038 ad96090a Blue Swirl
            goto show_valid_cards;
1039 ad96090a Blue Swirl
        }
1040 ad96090a Blue Swirl
    }
1041 ad96090a Blue Swirl
}
1042 0dfa5ef9 Isaku Yamahata
1043 4a0f031d Hervé Poussineau
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1044 0dfa5ef9 Isaku Yamahata
{
1045 0dfa5ef9 Isaku Yamahata
    struct soundhw *c;
1046 0dfa5ef9 Isaku Yamahata
1047 0dfa5ef9 Isaku Yamahata
    for (c = soundhw; c->name; ++c) {
1048 0dfa5ef9 Isaku Yamahata
        if (c->enabled) {
1049 0dfa5ef9 Isaku Yamahata
            if (c->isa) {
1050 4a0f031d Hervé Poussineau
                if (isa_bus) {
1051 4a0f031d Hervé Poussineau
                    c->init.init_isa(isa_bus);
1052 0dfa5ef9 Isaku Yamahata
                }
1053 0dfa5ef9 Isaku Yamahata
            } else {
1054 0dfa5ef9 Isaku Yamahata
                if (pci_bus) {
1055 0dfa5ef9 Isaku Yamahata
                    c->init.init_pci(pci_bus);
1056 0dfa5ef9 Isaku Yamahata
                }
1057 0dfa5ef9 Isaku Yamahata
            }
1058 0dfa5ef9 Isaku Yamahata
        }
1059 0dfa5ef9 Isaku Yamahata
    }
1060 0dfa5ef9 Isaku Yamahata
}
1061 ad96090a Blue Swirl
#else
1062 ad96090a Blue Swirl
void select_soundhw(const char *optarg)
1063 ad96090a Blue Swirl
{
1064 ad96090a Blue Swirl
}
1065 4a0f031d Hervé Poussineau
void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
1066 0dfa5ef9 Isaku Yamahata
{
1067 0dfa5ef9 Isaku Yamahata
}
1068 ad96090a Blue Swirl
#endif
1069 ad96090a Blue Swirl
1070 ad96090a Blue Swirl
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1071 ad96090a Blue Swirl
{
1072 ad96090a Blue Swirl
    int ret;
1073 ad96090a Blue Swirl
1074 ad96090a Blue Swirl
    if (strlen(str) != 36) {
1075 ad96090a Blue Swirl
        return -1;
1076 ad96090a Blue Swirl
    }
1077 ad96090a Blue Swirl
1078 ad96090a Blue Swirl
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1079 ad96090a Blue Swirl
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1080 ad96090a Blue Swirl
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1081 ad96090a Blue Swirl
                 &uuid[15]);
1082 ad96090a Blue Swirl
1083 ad96090a Blue Swirl
    if (ret != 16) {
1084 ad96090a Blue Swirl
        return -1;
1085 ad96090a Blue Swirl
    }
1086 ad96090a Blue Swirl
#ifdef TARGET_I386
1087 ad96090a Blue Swirl
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
1088 ad96090a Blue Swirl
#endif
1089 ad96090a Blue Swirl
    return 0;
1090 ad96090a Blue Swirl
}
1091 ad96090a Blue Swirl
1092 ad96090a Blue Swirl
void do_acpitable_option(const char *optarg)
1093 ad96090a Blue Swirl
{
1094 ad96090a Blue Swirl
#ifdef TARGET_I386
1095 ad96090a Blue Swirl
    if (acpi_table_add(optarg) < 0) {
1096 ad96090a Blue Swirl
        fprintf(stderr, "Wrong acpi table provided\n");
1097 ad96090a Blue Swirl
        exit(1);
1098 ad96090a Blue Swirl
    }
1099 ad96090a Blue Swirl
#endif
1100 ad96090a Blue Swirl
}
1101 ad96090a Blue Swirl
1102 ad96090a Blue Swirl
void do_smbios_option(const char *optarg)
1103 ad96090a Blue Swirl
{
1104 ad96090a Blue Swirl
#ifdef TARGET_I386
1105 ad96090a Blue Swirl
    if (smbios_entry_add(optarg) < 0) {
1106 ad96090a Blue Swirl
        fprintf(stderr, "Wrong smbios provided\n");
1107 ad96090a Blue Swirl
        exit(1);
1108 ad96090a Blue Swirl
    }
1109 ad96090a Blue Swirl
#endif
1110 ad96090a Blue Swirl
}
1111 ad96090a Blue Swirl
1112 ad96090a Blue Swirl
void cpudef_init(void)
1113 ad96090a Blue Swirl
{
1114 ad96090a Blue Swirl
#if defined(cpudef_setup)
1115 ad96090a Blue Swirl
    cpudef_setup(); /* parse cpu definitions in target config file */
1116 ad96090a Blue Swirl
#endif
1117 ad96090a Blue Swirl
}
1118 ad96090a Blue Swirl
1119 ad96090a Blue Swirl
int audio_available(void)
1120 ad96090a Blue Swirl
{
1121 ad96090a Blue Swirl
#ifdef HAS_AUDIO
1122 ad96090a Blue Swirl
    return 1;
1123 ad96090a Blue Swirl
#else
1124 ad96090a Blue Swirl
    return 0;
1125 ad96090a Blue Swirl
#endif
1126 ad96090a Blue Swirl
}
1127 ad96090a Blue Swirl
1128 303d4e86 Anthony PERARD
int tcg_available(void)
1129 303d4e86 Anthony PERARD
{
1130 303d4e86 Anthony PERARD
    return 1;
1131 303d4e86 Anthony PERARD
}
1132 303d4e86 Anthony PERARD
1133 ad96090a Blue Swirl
int kvm_available(void)
1134 ad96090a Blue Swirl
{
1135 ad96090a Blue Swirl
#ifdef CONFIG_KVM
1136 ad96090a Blue Swirl
    return 1;
1137 ad96090a Blue Swirl
#else
1138 ad96090a Blue Swirl
    return 0;
1139 ad96090a Blue Swirl
#endif
1140 ad96090a Blue Swirl
}
1141 ad96090a Blue Swirl
1142 ad96090a Blue Swirl
int xen_available(void)
1143 ad96090a Blue Swirl
{
1144 ad96090a Blue Swirl
#ifdef CONFIG_XEN
1145 ad96090a Blue Swirl
    return 1;
1146 ad96090a Blue Swirl
#else
1147 ad96090a Blue Swirl
    return 0;
1148 ad96090a Blue Swirl
#endif
1149 ad96090a Blue Swirl
}
1150 99afc91d Daniel P. Berrange
1151 99afc91d Daniel P. Berrange
1152 99afc91d Daniel P. Berrange
TargetInfo *qmp_query_target(Error **errp)
1153 99afc91d Daniel P. Berrange
{
1154 99afc91d Daniel P. Berrange
    TargetInfo *info = g_malloc0(sizeof(*info));
1155 99afc91d Daniel P. Berrange
1156 99afc91d Daniel P. Berrange
    info->arch = TARGET_TYPE;
1157 99afc91d Daniel P. Berrange
1158 99afc91d Daniel P. Berrange
    return info;
1159 99afc91d Daniel P. Berrange
}