Statistics
| Branch: | Revision:

root / arch_init.c @ 6a1751b7

History | View | Annotate | Download (33 kB)

1 ad96090a Blue Swirl
/*
2 ad96090a Blue Swirl
 * QEMU System Emulator
3 ad96090a Blue Swirl
 *
4 ad96090a Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 ad96090a Blue Swirl
 *
6 ad96090a Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 ad96090a Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 ad96090a Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 ad96090a Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 ad96090a Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 ad96090a Blue Swirl
 * furnished to do so, subject to the following conditions:
12 ad96090a Blue Swirl
 *
13 ad96090a Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 ad96090a Blue Swirl
 * all copies or substantial portions of the Software.
15 ad96090a Blue Swirl
 *
16 ad96090a Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 ad96090a Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 ad96090a Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 ad96090a Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 ad96090a Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 ad96090a Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 ad96090a Blue Swirl
 * THE SOFTWARE.
23 ad96090a Blue Swirl
 */
24 ad96090a Blue Swirl
#include <stdint.h>
25 ad96090a Blue Swirl
#include <stdarg.h>
26 b2e0a138 Michael S. Tsirkin
#include <stdlib.h>
27 ad96090a Blue Swirl
#ifndef _WIN32
28 1c47cb16 Blue Swirl
#include <sys/types.h>
29 ad96090a Blue Swirl
#include <sys/mman.h>
30 ad96090a Blue Swirl
#endif
31 ad96090a Blue Swirl
#include "config.h"
32 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
34 1de7afc9 Paolo Bonzini
#include "qemu/bitops.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/arch_init.h"
37 ad96090a Blue Swirl
#include "audio/audio.h"
38 0d09e41a Paolo Bonzini
#include "hw/i386/pc.h"
39 a2cb15b0 Michael S. Tsirkin
#include "hw/pci/pci.h"
40 0d09e41a Paolo Bonzini
#include "hw/audio/audio.h"
41 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
42 caf71f86 Paolo Bonzini
#include "migration/migration.h"
43 0d09e41a Paolo Bonzini
#include "hw/i386/smbios.h"
44 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
45 0d09e41a Paolo Bonzini
#include "hw/audio/pcspk.h"
46 caf71f86 Paolo Bonzini
#include "migration/page_cache.h"
47 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
48 99afc91d Daniel P. Berrange
#include "qmp-commands.h"
49 3c12193d Juan Quintela
#include "trace.h"
50 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
51 0445259b Michael S. Tsirkin
#include "hw/acpi/acpi.h"
52 ad96090a Blue Swirl
53 3a697f69 Orit Wasserman
#ifdef DEBUG_ARCH_INIT
54 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
55 3a697f69 Orit Wasserman
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
56 3a697f69 Orit Wasserman
#else
57 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
58 3a697f69 Orit Wasserman
    do { } while (0)
59 3a697f69 Orit Wasserman
#endif
60 3a697f69 Orit Wasserman
61 ad96090a Blue Swirl
#ifdef TARGET_SPARC
62 ad96090a Blue Swirl
int graphic_width = 1024;
63 ad96090a Blue Swirl
int graphic_height = 768;
64 ad96090a Blue Swirl
int graphic_depth = 8;
65 ad96090a Blue Swirl
#else
66 ad96090a Blue Swirl
int graphic_width = 800;
67 ad96090a Blue Swirl
int graphic_height = 600;
68 f1ff0e89 Alexander Graf
int graphic_depth = 32;
69 ad96090a Blue Swirl
#endif
70 ad96090a Blue Swirl
71 ad96090a Blue Swirl
72 ad96090a Blue Swirl
#if defined(TARGET_ALPHA)
73 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ALPHA
74 ad96090a Blue Swirl
#elif defined(TARGET_ARM)
75 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ARM
76 ad96090a Blue Swirl
#elif defined(TARGET_CRIS)
77 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_CRIS
78 ad96090a Blue Swirl
#elif defined(TARGET_I386)
79 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_I386
80 ad96090a Blue Swirl
#elif defined(TARGET_M68K)
81 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_M68K
82 81ea0e13 Michael Walle
#elif defined(TARGET_LM32)
83 81ea0e13 Michael Walle
#define QEMU_ARCH QEMU_ARCH_LM32
84 ad96090a Blue Swirl
#elif defined(TARGET_MICROBLAZE)
85 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
86 ad96090a Blue Swirl
#elif defined(TARGET_MIPS)
87 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MIPS
88 d15a9c23 Anthony Green
#elif defined(TARGET_MOXIE)
89 d15a9c23 Anthony Green
#define QEMU_ARCH QEMU_ARCH_MOXIE
90 e67db06e Jia Liu
#elif defined(TARGET_OPENRISC)
91 e67db06e Jia Liu
#define QEMU_ARCH QEMU_ARCH_OPENRISC
92 ad96090a Blue Swirl
#elif defined(TARGET_PPC)
93 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_PPC
94 ad96090a Blue Swirl
#elif defined(TARGET_S390X)
95 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_S390X
96 ad96090a Blue Swirl
#elif defined(TARGET_SH4)
97 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SH4
98 ad96090a Blue Swirl
#elif defined(TARGET_SPARC)
99 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SPARC
100 2328826b Max Filippov
#elif defined(TARGET_XTENSA)
101 2328826b Max Filippov
#define QEMU_ARCH QEMU_ARCH_XTENSA
102 4f23a1e6 Guan Xuetao
#elif defined(TARGET_UNICORE32)
103 4f23a1e6 Guan Xuetao
#define QEMU_ARCH QEMU_ARCH_UNICORE32
104 ad96090a Blue Swirl
#endif
105 ad96090a Blue Swirl
106 ad96090a Blue Swirl
const uint32_t arch_type = QEMU_ARCH;
107 7ca1dfad Chegu Vinod
static bool mig_throttle_on;
108 7ca1dfad Chegu Vinod
static int dirty_rate_high_cnt;
109 7ca1dfad Chegu Vinod
static void check_guest_throttling(void);
110 ad96090a Blue Swirl
111 ad96090a Blue Swirl
/***********************************************************/
112 ad96090a Blue Swirl
/* ram save/restore */
113 ad96090a Blue Swirl
114 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
115 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_COMPRESS 0x02
116 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
117 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_PAGE     0x08
118 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_EOS      0x10
119 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_CONTINUE 0x20
120 17ad9b35 Orit Wasserman
#define RAM_SAVE_FLAG_XBZRLE   0x40
121 0033b8b4 Michael R. Hines
/* 0x80 is reserved in migration.h start with 0x100 next */
122 ad96090a Blue Swirl
123 b5a8fe5e Eduardo Habkost
124 756557de Eduardo Habkost
static struct defconfig_file {
125 756557de Eduardo Habkost
    const char *filename;
126 f29a5614 Eduardo Habkost
    /* Indicates it is an user config file (disabled by -no-user-config) */
127 f29a5614 Eduardo Habkost
    bool userconfig;
128 756557de Eduardo Habkost
} default_config_files[] = {
129 f29a5614 Eduardo Habkost
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
130 2e59915d Paolo Bonzini
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
131 756557de Eduardo Habkost
    { NULL }, /* end of list */
132 756557de Eduardo Habkost
};
133 756557de Eduardo Habkost
134 756557de Eduardo Habkost
135 f29a5614 Eduardo Habkost
int qemu_read_default_config_files(bool userconfig)
136 b5a8fe5e Eduardo Habkost
{
137 b5a8fe5e Eduardo Habkost
    int ret;
138 756557de Eduardo Habkost
    struct defconfig_file *f;
139 b5a8fe5e Eduardo Habkost
140 756557de Eduardo Habkost
    for (f = default_config_files; f->filename; f++) {
141 f29a5614 Eduardo Habkost
        if (!userconfig && f->userconfig) {
142 f29a5614 Eduardo Habkost
            continue;
143 f29a5614 Eduardo Habkost
        }
144 756557de Eduardo Habkost
        ret = qemu_read_config_file(f->filename);
145 756557de Eduardo Habkost
        if (ret < 0 && ret != -ENOENT) {
146 756557de Eduardo Habkost
            return ret;
147 756557de Eduardo Habkost
        }
148 b5a8fe5e Eduardo Habkost
    }
149 4d8b3c63 Laszlo Ersek
150 b5a8fe5e Eduardo Habkost
    return 0;
151 b5a8fe5e Eduardo Habkost
}
152 b5a8fe5e Eduardo Habkost
153 3edcd7e6 Peter Lieven
static inline bool is_zero_page(uint8_t *p)
154 ad96090a Blue Swirl
{
155 3edcd7e6 Peter Lieven
    return buffer_find_nonzero_offset(p, TARGET_PAGE_SIZE) ==
156 3edcd7e6 Peter Lieven
        TARGET_PAGE_SIZE;
157 ad96090a Blue Swirl
}
158 ad96090a Blue Swirl
159 17ad9b35 Orit Wasserman
/* struct contains XBZRLE cache and a static page
160 17ad9b35 Orit Wasserman
   used by the compression */
161 17ad9b35 Orit Wasserman
static struct {
162 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE encoding */
163 17ad9b35 Orit Wasserman
    uint8_t *encoded_buf;
164 17ad9b35 Orit Wasserman
    /* buffer for storing page content */
165 17ad9b35 Orit Wasserman
    uint8_t *current_buf;
166 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE decoding */
167 17ad9b35 Orit Wasserman
    uint8_t *decoded_buf;
168 17ad9b35 Orit Wasserman
    /* Cache for XBZRLE */
169 17ad9b35 Orit Wasserman
    PageCache *cache;
170 17ad9b35 Orit Wasserman
} XBZRLE = {
171 17ad9b35 Orit Wasserman
    .encoded_buf = NULL,
172 17ad9b35 Orit Wasserman
    .current_buf = NULL,
173 17ad9b35 Orit Wasserman
    .decoded_buf = NULL,
174 17ad9b35 Orit Wasserman
    .cache = NULL,
175 17ad9b35 Orit Wasserman
};
176 17ad9b35 Orit Wasserman
177 9e1ba4cc Orit Wasserman
178 9e1ba4cc Orit Wasserman
int64_t xbzrle_cache_resize(int64_t new_size)
179 9e1ba4cc Orit Wasserman
{
180 9e1ba4cc Orit Wasserman
    if (XBZRLE.cache != NULL) {
181 9e1ba4cc Orit Wasserman
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
182 9e1ba4cc Orit Wasserman
            TARGET_PAGE_SIZE;
183 9e1ba4cc Orit Wasserman
    }
184 9e1ba4cc Orit Wasserman
    return pow2floor(new_size);
185 9e1ba4cc Orit Wasserman
}
186 9e1ba4cc Orit Wasserman
187 004d4c10 Orit Wasserman
/* accounting for migration statistics */
188 004d4c10 Orit Wasserman
typedef struct AccountingInfo {
189 004d4c10 Orit Wasserman
    uint64_t dup_pages;
190 f1c72795 Peter Lieven
    uint64_t skipped_pages;
191 004d4c10 Orit Wasserman
    uint64_t norm_pages;
192 004d4c10 Orit Wasserman
    uint64_t iterations;
193 f36d55af Orit Wasserman
    uint64_t xbzrle_bytes;
194 f36d55af Orit Wasserman
    uint64_t xbzrle_pages;
195 f36d55af Orit Wasserman
    uint64_t xbzrle_cache_miss;
196 f36d55af Orit Wasserman
    uint64_t xbzrle_overflows;
197 004d4c10 Orit Wasserman
} AccountingInfo;
198 004d4c10 Orit Wasserman
199 004d4c10 Orit Wasserman
static AccountingInfo acct_info;
200 004d4c10 Orit Wasserman
201 004d4c10 Orit Wasserman
static void acct_clear(void)
202 004d4c10 Orit Wasserman
{
203 004d4c10 Orit Wasserman
    memset(&acct_info, 0, sizeof(acct_info));
204 004d4c10 Orit Wasserman
}
205 004d4c10 Orit Wasserman
206 004d4c10 Orit Wasserman
uint64_t dup_mig_bytes_transferred(void)
207 004d4c10 Orit Wasserman
{
208 004d4c10 Orit Wasserman
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
209 004d4c10 Orit Wasserman
}
210 004d4c10 Orit Wasserman
211 004d4c10 Orit Wasserman
uint64_t dup_mig_pages_transferred(void)
212 004d4c10 Orit Wasserman
{
213 004d4c10 Orit Wasserman
    return acct_info.dup_pages;
214 004d4c10 Orit Wasserman
}
215 004d4c10 Orit Wasserman
216 f1c72795 Peter Lieven
uint64_t skipped_mig_bytes_transferred(void)
217 f1c72795 Peter Lieven
{
218 f1c72795 Peter Lieven
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
219 f1c72795 Peter Lieven
}
220 f1c72795 Peter Lieven
221 f1c72795 Peter Lieven
uint64_t skipped_mig_pages_transferred(void)
222 f1c72795 Peter Lieven
{
223 f1c72795 Peter Lieven
    return acct_info.skipped_pages;
224 f1c72795 Peter Lieven
}
225 f1c72795 Peter Lieven
226 004d4c10 Orit Wasserman
uint64_t norm_mig_bytes_transferred(void)
227 004d4c10 Orit Wasserman
{
228 004d4c10 Orit Wasserman
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
229 004d4c10 Orit Wasserman
}
230 004d4c10 Orit Wasserman
231 004d4c10 Orit Wasserman
uint64_t norm_mig_pages_transferred(void)
232 004d4c10 Orit Wasserman
{
233 004d4c10 Orit Wasserman
    return acct_info.norm_pages;
234 004d4c10 Orit Wasserman
}
235 004d4c10 Orit Wasserman
236 f36d55af Orit Wasserman
uint64_t xbzrle_mig_bytes_transferred(void)
237 f36d55af Orit Wasserman
{
238 f36d55af Orit Wasserman
    return acct_info.xbzrle_bytes;
239 f36d55af Orit Wasserman
}
240 f36d55af Orit Wasserman
241 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_transferred(void)
242 f36d55af Orit Wasserman
{
243 f36d55af Orit Wasserman
    return acct_info.xbzrle_pages;
244 f36d55af Orit Wasserman
}
245 f36d55af Orit Wasserman
246 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_cache_miss(void)
247 f36d55af Orit Wasserman
{
248 f36d55af Orit Wasserman
    return acct_info.xbzrle_cache_miss;
249 f36d55af Orit Wasserman
}
250 f36d55af Orit Wasserman
251 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_overflow(void)
252 f36d55af Orit Wasserman
{
253 f36d55af Orit Wasserman
    return acct_info.xbzrle_overflows;
254 f36d55af Orit Wasserman
}
255 f36d55af Orit Wasserman
256 3f7d7b09 Juan Quintela
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
257 3f7d7b09 Juan Quintela
                             int cont, int flag)
258 0c51f43d Orit Wasserman
{
259 3f7d7b09 Juan Quintela
    size_t size;
260 3f7d7b09 Juan Quintela
261 3f7d7b09 Juan Quintela
    qemu_put_be64(f, offset | cont | flag);
262 3f7d7b09 Juan Quintela
    size = 8;
263 0c51f43d Orit Wasserman
264 3f7d7b09 Juan Quintela
    if (!cont) {
265 3f7d7b09 Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
266 3f7d7b09 Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr,
267 3f7d7b09 Juan Quintela
                        strlen(block->idstr));
268 3f7d7b09 Juan Quintela
        size += 1 + strlen(block->idstr);
269 3f7d7b09 Juan Quintela
    }
270 3f7d7b09 Juan Quintela
    return size;
271 0c51f43d Orit Wasserman
}
272 0c51f43d Orit Wasserman
273 17ad9b35 Orit Wasserman
#define ENCODING_FLAG_XBZRLE 0x1
274 17ad9b35 Orit Wasserman
275 17ad9b35 Orit Wasserman
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
276 17ad9b35 Orit Wasserman
                            ram_addr_t current_addr, RAMBlock *block,
277 dd051c72 Juan Quintela
                            ram_addr_t offset, int cont, bool last_stage)
278 17ad9b35 Orit Wasserman
{
279 17ad9b35 Orit Wasserman
    int encoded_len = 0, bytes_sent = -1;
280 17ad9b35 Orit Wasserman
    uint8_t *prev_cached_page;
281 17ad9b35 Orit Wasserman
282 17ad9b35 Orit Wasserman
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
283 dd051c72 Juan Quintela
        if (!last_stage) {
284 ee0b44aa Peter Lieven
            cache_insert(XBZRLE.cache, current_addr, current_data);
285 dd051c72 Juan Quintela
        }
286 f36d55af Orit Wasserman
        acct_info.xbzrle_cache_miss++;
287 17ad9b35 Orit Wasserman
        return -1;
288 17ad9b35 Orit Wasserman
    }
289 17ad9b35 Orit Wasserman
290 17ad9b35 Orit Wasserman
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
291 17ad9b35 Orit Wasserman
292 17ad9b35 Orit Wasserman
    /* save current buffer into memory */
293 17ad9b35 Orit Wasserman
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
294 17ad9b35 Orit Wasserman
295 17ad9b35 Orit Wasserman
    /* XBZRLE encoding (if there is no overflow) */
296 17ad9b35 Orit Wasserman
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
297 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
298 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE);
299 17ad9b35 Orit Wasserman
    if (encoded_len == 0) {
300 17ad9b35 Orit Wasserman
        DPRINTF("Skipping unmodified page\n");
301 17ad9b35 Orit Wasserman
        return 0;
302 17ad9b35 Orit Wasserman
    } else if (encoded_len == -1) {
303 17ad9b35 Orit Wasserman
        DPRINTF("Overflow\n");
304 f36d55af Orit Wasserman
        acct_info.xbzrle_overflows++;
305 17ad9b35 Orit Wasserman
        /* update data in the cache */
306 17ad9b35 Orit Wasserman
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
307 17ad9b35 Orit Wasserman
        return -1;
308 17ad9b35 Orit Wasserman
    }
309 17ad9b35 Orit Wasserman
310 17ad9b35 Orit Wasserman
    /* we need to update the data in the cache, in order to get the same data */
311 dd051c72 Juan Quintela
    if (!last_stage) {
312 dd051c72 Juan Quintela
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
313 dd051c72 Juan Quintela
    }
314 17ad9b35 Orit Wasserman
315 17ad9b35 Orit Wasserman
    /* Send XBZRLE based compressed page */
316 3f7d7b09 Juan Quintela
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
317 17ad9b35 Orit Wasserman
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
318 17ad9b35 Orit Wasserman
    qemu_put_be16(f, encoded_len);
319 17ad9b35 Orit Wasserman
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
320 3f7d7b09 Juan Quintela
    bytes_sent += encoded_len + 1 + 2;
321 f36d55af Orit Wasserman
    acct_info.xbzrle_pages++;
322 f36d55af Orit Wasserman
    acct_info.xbzrle_bytes += bytes_sent;
323 17ad9b35 Orit Wasserman
324 17ad9b35 Orit Wasserman
    return bytes_sent;
325 17ad9b35 Orit Wasserman
}
326 17ad9b35 Orit Wasserman
327 b23a9a5c Juan Quintela
328 b23a9a5c Juan Quintela
/* This is the last block that we have visited serching for dirty pages
329 b23a9a5c Juan Quintela
 */
330 b23a9a5c Juan Quintela
static RAMBlock *last_seen_block;
331 5f718a15 Juan Quintela
/* This is the last block from where we have sent data */
332 5f718a15 Juan Quintela
static RAMBlock *last_sent_block;
333 760e77ea Alex Williamson
static ram_addr_t last_offset;
334 c6bf8e0e Juan Quintela
static unsigned long *migration_bitmap;
335 c6bf8e0e Juan Quintela
static uint64_t migration_dirty_pages;
336 f798b07f Umesh Deshpande
static uint32_t last_version;
337 78d07ae7 Peter Lieven
static bool ram_bulk_stage;
338 760e77ea Alex Williamson
339 4c8ae0f6 Juan Quintela
static inline
340 4c8ae0f6 Juan Quintela
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
341 4c8ae0f6 Juan Quintela
                                                 ram_addr_t start)
342 69268cde Juan Quintela
{
343 4c8ae0f6 Juan Quintela
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
344 4c8ae0f6 Juan Quintela
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
345 4c8ae0f6 Juan Quintela
    unsigned long size = base + (int128_get64(mr->size) >> TARGET_PAGE_BITS);
346 c6bf8e0e Juan Quintela
347 70c8652b Peter Lieven
    unsigned long next;
348 70c8652b Peter Lieven
349 70c8652b Peter Lieven
    if (ram_bulk_stage && nr > base) {
350 70c8652b Peter Lieven
        next = nr + 1;
351 70c8652b Peter Lieven
    } else {
352 70c8652b Peter Lieven
        next = find_next_bit(migration_bitmap, size, nr);
353 70c8652b Peter Lieven
    }
354 69268cde Juan Quintela
355 4c8ae0f6 Juan Quintela
    if (next < size) {
356 4c8ae0f6 Juan Quintela
        clear_bit(next, migration_bitmap);
357 c6bf8e0e Juan Quintela
        migration_dirty_pages--;
358 69268cde Juan Quintela
    }
359 4c8ae0f6 Juan Quintela
    return (next - base) << TARGET_PAGE_BITS;
360 69268cde Juan Quintela
}
361 69268cde Juan Quintela
362 c6bf8e0e Juan Quintela
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
363 c6bf8e0e Juan Quintela
                                              ram_addr_t offset)
364 e44d26c8 Juan Quintela
{
365 c6bf8e0e Juan Quintela
    bool ret;
366 c6bf8e0e Juan Quintela
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
367 e44d26c8 Juan Quintela
368 c6bf8e0e Juan Quintela
    ret = test_and_set_bit(nr, migration_bitmap);
369 c6bf8e0e Juan Quintela
370 c6bf8e0e Juan Quintela
    if (!ret) {
371 c6bf8e0e Juan Quintela
        migration_dirty_pages++;
372 e44d26c8 Juan Quintela
    }
373 c6bf8e0e Juan Quintela
    return ret;
374 e44d26c8 Juan Quintela
}
375 e44d26c8 Juan Quintela
376 32c835ba Paolo Bonzini
/* Needs iothread lock! */
377 32c835ba Paolo Bonzini
378 dd2df737 Juan Quintela
static void migration_bitmap_sync(void)
379 dd2df737 Juan Quintela
{
380 c6bf8e0e Juan Quintela
    RAMBlock *block;
381 c6bf8e0e Juan Quintela
    ram_addr_t addr;
382 c6bf8e0e Juan Quintela
    uint64_t num_dirty_pages_init = migration_dirty_pages;
383 8d017193 Juan Quintela
    MigrationState *s = migrate_get_current();
384 8d017193 Juan Quintela
    static int64_t start_time;
385 7ca1dfad Chegu Vinod
    static int64_t bytes_xfer_prev;
386 8d017193 Juan Quintela
    static int64_t num_dirty_pages_period;
387 8d017193 Juan Quintela
    int64_t end_time;
388 7ca1dfad Chegu Vinod
    int64_t bytes_xfer_now;
389 7ca1dfad Chegu Vinod
390 7ca1dfad Chegu Vinod
    if (!bytes_xfer_prev) {
391 7ca1dfad Chegu Vinod
        bytes_xfer_prev = ram_bytes_transferred();
392 7ca1dfad Chegu Vinod
    }
393 8d017193 Juan Quintela
394 8d017193 Juan Quintela
    if (!start_time) {
395 8d017193 Juan Quintela
        start_time = qemu_get_clock_ms(rt_clock);
396 8d017193 Juan Quintela
    }
397 3c12193d Juan Quintela
398 3c12193d Juan Quintela
    trace_migration_bitmap_sync_start();
399 1d671369 Paolo Bonzini
    address_space_sync_dirty_bitmap(&address_space_memory);
400 c6bf8e0e Juan Quintela
401 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
402 c6bf8e0e Juan Quintela
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
403 ece79318 Juan Quintela
            if (memory_region_test_and_clear_dirty(block->mr,
404 ece79318 Juan Quintela
                                                   addr, TARGET_PAGE_SIZE,
405 ece79318 Juan Quintela
                                                   DIRTY_MEMORY_MIGRATION)) {
406 c6bf8e0e Juan Quintela
                migration_bitmap_set_dirty(block->mr, addr);
407 c6bf8e0e Juan Quintela
            }
408 c6bf8e0e Juan Quintela
        }
409 c6bf8e0e Juan Quintela
    }
410 c6bf8e0e Juan Quintela
    trace_migration_bitmap_sync_end(migration_dirty_pages
411 3c12193d Juan Quintela
                                    - num_dirty_pages_init);
412 8d017193 Juan Quintela
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
413 8d017193 Juan Quintela
    end_time = qemu_get_clock_ms(rt_clock);
414 8d017193 Juan Quintela
415 8d017193 Juan Quintela
    /* more than 1 second = 1000 millisecons */
416 8d017193 Juan Quintela
    if (end_time > start_time + 1000) {
417 7ca1dfad Chegu Vinod
        if (migrate_auto_converge()) {
418 7ca1dfad Chegu Vinod
            /* The following detection logic can be refined later. For now:
419 7ca1dfad Chegu Vinod
               Check to see if the dirtied bytes is 50% more than the approx.
420 7ca1dfad Chegu Vinod
               amount of bytes that just got transferred since the last time we
421 7ca1dfad Chegu Vinod
               were in this routine. If that happens >N times (for now N==4)
422 7ca1dfad Chegu Vinod
               we turn on the throttle down logic */
423 7ca1dfad Chegu Vinod
            bytes_xfer_now = ram_bytes_transferred();
424 7ca1dfad Chegu Vinod
            if (s->dirty_pages_rate &&
425 7ca1dfad Chegu Vinod
               (num_dirty_pages_period * TARGET_PAGE_SIZE >
426 7ca1dfad Chegu Vinod
                   (bytes_xfer_now - bytes_xfer_prev)/2) &&
427 7ca1dfad Chegu Vinod
               (dirty_rate_high_cnt++ > 4)) {
428 7ca1dfad Chegu Vinod
                    trace_migration_throttle();
429 7ca1dfad Chegu Vinod
                    mig_throttle_on = true;
430 7ca1dfad Chegu Vinod
                    dirty_rate_high_cnt = 0;
431 7ca1dfad Chegu Vinod
             }
432 7ca1dfad Chegu Vinod
             bytes_xfer_prev = bytes_xfer_now;
433 7ca1dfad Chegu Vinod
        } else {
434 7ca1dfad Chegu Vinod
             mig_throttle_on = false;
435 7ca1dfad Chegu Vinod
        }
436 8d017193 Juan Quintela
        s->dirty_pages_rate = num_dirty_pages_period * 1000
437 8d017193 Juan Quintela
            / (end_time - start_time);
438 90f8ae72 Juan Quintela
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
439 8d017193 Juan Quintela
        start_time = end_time;
440 8d017193 Juan Quintela
        num_dirty_pages_period = 0;
441 8d017193 Juan Quintela
    }
442 dd2df737 Juan Quintela
}
443 dd2df737 Juan Quintela
444 6c779f22 Orit Wasserman
/*
445 6c779f22 Orit Wasserman
 * ram_save_block: Writes a page of memory to the stream f
446 6c779f22 Orit Wasserman
 *
447 b823ceaa Juan Quintela
 * Returns:  The number of bytes written.
448 b823ceaa Juan Quintela
 *           0 means no dirty pages
449 6c779f22 Orit Wasserman
 */
450 6c779f22 Orit Wasserman
451 dd051c72 Juan Quintela
static int ram_save_block(QEMUFile *f, bool last_stage)
452 ad96090a Blue Swirl
{
453 b23a9a5c Juan Quintela
    RAMBlock *block = last_seen_block;
454 e44359c3 Alex Williamson
    ram_addr_t offset = last_offset;
455 4c8ae0f6 Juan Quintela
    bool complete_round = false;
456 b823ceaa Juan Quintela
    int bytes_sent = 0;
457 71c510e2 Avi Kivity
    MemoryRegion *mr;
458 17ad9b35 Orit Wasserman
    ram_addr_t current_addr;
459 ad96090a Blue Swirl
460 e44359c3 Alex Williamson
    if (!block)
461 a3161038 Paolo Bonzini
        block = QTAILQ_FIRST(&ram_list.blocks);
462 e44359c3 Alex Williamson
463 4c8ae0f6 Juan Quintela
    while (true) {
464 71c510e2 Avi Kivity
        mr = block->mr;
465 4c8ae0f6 Juan Quintela
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
466 4c8ae0f6 Juan Quintela
        if (complete_round && block == last_seen_block &&
467 4c8ae0f6 Juan Quintela
            offset >= last_offset) {
468 4c8ae0f6 Juan Quintela
            break;
469 4c8ae0f6 Juan Quintela
        }
470 4c8ae0f6 Juan Quintela
        if (offset >= block->length) {
471 4c8ae0f6 Juan Quintela
            offset = 0;
472 4c8ae0f6 Juan Quintela
            block = QTAILQ_NEXT(block, next);
473 4c8ae0f6 Juan Quintela
            if (!block) {
474 4c8ae0f6 Juan Quintela
                block = QTAILQ_FIRST(&ram_list.blocks);
475 4c8ae0f6 Juan Quintela
                complete_round = true;
476 78d07ae7 Peter Lieven
                ram_bulk_stage = false;
477 4c8ae0f6 Juan Quintela
            }
478 4c8ae0f6 Juan Quintela
        } else {
479 0033b8b4 Michael R. Hines
            int ret;
480 ad96090a Blue Swirl
            uint8_t *p;
481 5f718a15 Juan Quintela
            int cont = (block == last_sent_block) ?
482 b23a9a5c Juan Quintela
                RAM_SAVE_FLAG_CONTINUE : 0;
483 ad96090a Blue Swirl
484 71c510e2 Avi Kivity
            p = memory_region_get_ram_ptr(mr) + offset;
485 ad96090a Blue Swirl
486 b823ceaa Juan Quintela
            /* In doubt sent page as normal */
487 b823ceaa Juan Quintela
            bytes_sent = -1;
488 0033b8b4 Michael R. Hines
            ret = ram_control_save_page(f, block->offset,
489 0033b8b4 Michael R. Hines
                               offset, TARGET_PAGE_SIZE, &bytes_sent);
490 0033b8b4 Michael R. Hines
491 0033b8b4 Michael R. Hines
            if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
492 0033b8b4 Michael R. Hines
                if (ret != RAM_SAVE_CONTROL_DELAYED) {
493 0033b8b4 Michael R. Hines
                    if (bytes_sent > 0) {
494 0033b8b4 Michael R. Hines
                        acct_info.norm_pages++;
495 0033b8b4 Michael R. Hines
                    } else if (bytes_sent == 0) {
496 0033b8b4 Michael R. Hines
                        acct_info.dup_pages++;
497 0033b8b4 Michael R. Hines
                    }
498 0033b8b4 Michael R. Hines
                }
499 0033b8b4 Michael R. Hines
            } else if (is_zero_page(p)) {
500 004d4c10 Orit Wasserman
                acct_info.dup_pages++;
501 9ef051e5 Peter Lieven
                bytes_sent = save_block_hdr(f, block, offset, cont,
502 9ef051e5 Peter Lieven
                                            RAM_SAVE_FLAG_COMPRESS);
503 9ef051e5 Peter Lieven
                qemu_put_byte(f, 0);
504 9ef051e5 Peter Lieven
                bytes_sent++;
505 5cc11c46 Peter Lieven
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
506 17ad9b35 Orit Wasserman
                current_addr = block->offset + offset;
507 17ad9b35 Orit Wasserman
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
508 dd051c72 Juan Quintela
                                              offset, cont, last_stage);
509 dd051c72 Juan Quintela
                if (!last_stage) {
510 dd051c72 Juan Quintela
                    p = get_cached_data(XBZRLE.cache, current_addr);
511 dd051c72 Juan Quintela
                }
512 17ad9b35 Orit Wasserman
            }
513 17ad9b35 Orit Wasserman
514 b823ceaa Juan Quintela
            /* XBZRLE overflow or normal page */
515 17ad9b35 Orit Wasserman
            if (bytes_sent == -1) {
516 3f7d7b09 Juan Quintela
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
517 500f0061 Orit Wasserman
                qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
518 3f7d7b09 Juan Quintela
                bytes_sent += TARGET_PAGE_SIZE;
519 004d4c10 Orit Wasserman
                acct_info.norm_pages++;
520 ad96090a Blue Swirl
            }
521 ad96090a Blue Swirl
522 17ad9b35 Orit Wasserman
            /* if page is unmodified, continue to the next */
523 b823ceaa Juan Quintela
            if (bytes_sent > 0) {
524 5f718a15 Juan Quintela
                last_sent_block = block;
525 17ad9b35 Orit Wasserman
                break;
526 17ad9b35 Orit Wasserman
            }
527 ad96090a Blue Swirl
        }
528 4c8ae0f6 Juan Quintela
    }
529 b23a9a5c Juan Quintela
    last_seen_block = block;
530 e44359c3 Alex Williamson
    last_offset = offset;
531 ad96090a Blue Swirl
532 3fc250b4 Pierre Riteau
    return bytes_sent;
533 ad96090a Blue Swirl
}
534 ad96090a Blue Swirl
535 ad96090a Blue Swirl
static uint64_t bytes_transferred;
536 ad96090a Blue Swirl
537 2b0ce079 Michael R. Hines
void acct_update_position(QEMUFile *f, size_t size, bool zero)
538 2b0ce079 Michael R. Hines
{
539 2b0ce079 Michael R. Hines
    uint64_t pages = size / TARGET_PAGE_SIZE;
540 2b0ce079 Michael R. Hines
    if (zero) {
541 2b0ce079 Michael R. Hines
        acct_info.dup_pages += pages;
542 2b0ce079 Michael R. Hines
    } else {
543 2b0ce079 Michael R. Hines
        acct_info.norm_pages += pages;
544 2b0ce079 Michael R. Hines
        bytes_transferred += size;
545 2b0ce079 Michael R. Hines
        qemu_update_position(f, size);
546 2b0ce079 Michael R. Hines
    }
547 2b0ce079 Michael R. Hines
}
548 2b0ce079 Michael R. Hines
549 ad96090a Blue Swirl
static ram_addr_t ram_save_remaining(void)
550 ad96090a Blue Swirl
{
551 c6bf8e0e Juan Quintela
    return migration_dirty_pages;
552 ad96090a Blue Swirl
}
553 ad96090a Blue Swirl
554 ad96090a Blue Swirl
uint64_t ram_bytes_remaining(void)
555 ad96090a Blue Swirl
{
556 ad96090a Blue Swirl
    return ram_save_remaining() * TARGET_PAGE_SIZE;
557 ad96090a Blue Swirl
}
558 ad96090a Blue Swirl
559 ad96090a Blue Swirl
uint64_t ram_bytes_transferred(void)
560 ad96090a Blue Swirl
{
561 ad96090a Blue Swirl
    return bytes_transferred;
562 ad96090a Blue Swirl
}
563 ad96090a Blue Swirl
564 ad96090a Blue Swirl
uint64_t ram_bytes_total(void)
565 ad96090a Blue Swirl
{
566 d17b5288 Alex Williamson
    RAMBlock *block;
567 d17b5288 Alex Williamson
    uint64_t total = 0;
568 d17b5288 Alex Williamson
569 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
570 d17b5288 Alex Williamson
        total += block->length;
571 d17b5288 Alex Williamson
572 d17b5288 Alex Williamson
    return total;
573 ad96090a Blue Swirl
}
574 ad96090a Blue Swirl
575 8e21cd32 Orit Wasserman
static void migration_end(void)
576 8e21cd32 Orit Wasserman
{
577 244eaa75 Paolo Bonzini
    if (migration_bitmap) {
578 244eaa75 Paolo Bonzini
        memory_global_dirty_log_stop();
579 244eaa75 Paolo Bonzini
        g_free(migration_bitmap);
580 244eaa75 Paolo Bonzini
        migration_bitmap = NULL;
581 244eaa75 Paolo Bonzini
    }
582 17ad9b35 Orit Wasserman
583 244eaa75 Paolo Bonzini
    if (XBZRLE.cache) {
584 17ad9b35 Orit Wasserman
        cache_fini(XBZRLE.cache);
585 17ad9b35 Orit Wasserman
        g_free(XBZRLE.cache);
586 17ad9b35 Orit Wasserman
        g_free(XBZRLE.encoded_buf);
587 17ad9b35 Orit Wasserman
        g_free(XBZRLE.current_buf);
588 17ad9b35 Orit Wasserman
        g_free(XBZRLE.decoded_buf);
589 17ad9b35 Orit Wasserman
        XBZRLE.cache = NULL;
590 17ad9b35 Orit Wasserman
    }
591 8e21cd32 Orit Wasserman
}
592 8e21cd32 Orit Wasserman
593 9b5bfab0 Juan Quintela
static void ram_migration_cancel(void *opaque)
594 9b5bfab0 Juan Quintela
{
595 9b5bfab0 Juan Quintela
    migration_end();
596 9b5bfab0 Juan Quintela
}
597 9b5bfab0 Juan Quintela
598 5a170775 Juan Quintela
static void reset_ram_globals(void)
599 5a170775 Juan Quintela
{
600 b23a9a5c Juan Quintela
    last_seen_block = NULL;
601 5f718a15 Juan Quintela
    last_sent_block = NULL;
602 5a170775 Juan Quintela
    last_offset = 0;
603 f798b07f Umesh Deshpande
    last_version = ram_list.version;
604 78d07ae7 Peter Lieven
    ram_bulk_stage = true;
605 5a170775 Juan Quintela
}
606 5a170775 Juan Quintela
607 4508bd9e Juan Quintela
#define MAX_WAIT 50 /* ms, half buffered_file limit */
608 4508bd9e Juan Quintela
609 d1315aac Juan Quintela
static int ram_save_setup(QEMUFile *f, void *opaque)
610 ad96090a Blue Swirl
{
611 d1315aac Juan Quintela
    RAMBlock *block;
612 c6bf8e0e Juan Quintela
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
613 c6bf8e0e Juan Quintela
614 c6bf8e0e Juan Quintela
    migration_bitmap = bitmap_new(ram_pages);
615 7ec81e56 David Gibson
    bitmap_set(migration_bitmap, 0, ram_pages);
616 c6bf8e0e Juan Quintela
    migration_dirty_pages = ram_pages;
617 7ca1dfad Chegu Vinod
    mig_throttle_on = false;
618 7ca1dfad Chegu Vinod
    dirty_rate_high_cnt = 0;
619 ad96090a Blue Swirl
620 17ad9b35 Orit Wasserman
    if (migrate_use_xbzrle()) {
621 17ad9b35 Orit Wasserman
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
622 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE,
623 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE);
624 17ad9b35 Orit Wasserman
        if (!XBZRLE.cache) {
625 17ad9b35 Orit Wasserman
            DPRINTF("Error creating cache\n");
626 17ad9b35 Orit Wasserman
            return -1;
627 17ad9b35 Orit Wasserman
        }
628 17ad9b35 Orit Wasserman
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
629 17ad9b35 Orit Wasserman
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
630 004d4c10 Orit Wasserman
        acct_clear();
631 17ad9b35 Orit Wasserman
    }
632 17ad9b35 Orit Wasserman
633 9b095037 Paolo Bonzini
    qemu_mutex_lock_iothread();
634 9b095037 Paolo Bonzini
    qemu_mutex_lock_ramlist();
635 9b095037 Paolo Bonzini
    bytes_transferred = 0;
636 9b095037 Paolo Bonzini
    reset_ram_globals();
637 9b095037 Paolo Bonzini
638 d1315aac Juan Quintela
    memory_global_dirty_log_start();
639 c6bf8e0e Juan Quintela
    migration_bitmap_sync();
640 9b095037 Paolo Bonzini
    qemu_mutex_unlock_iothread();
641 ad96090a Blue Swirl
642 d1315aac Juan Quintela
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
643 97ab12d4 Alex Williamson
644 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
645 d1315aac Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
646 d1315aac Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
647 d1315aac Juan Quintela
        qemu_put_be64(f, block->length);
648 ad96090a Blue Swirl
    }
649 ad96090a Blue Swirl
650 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
651 0033b8b4 Michael R. Hines
652 0033b8b4 Michael R. Hines
    ram_control_before_iterate(f, RAM_CONTROL_SETUP);
653 0033b8b4 Michael R. Hines
    ram_control_after_iterate(f, RAM_CONTROL_SETUP);
654 0033b8b4 Michael R. Hines
655 d1315aac Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
656 d1315aac Juan Quintela
657 d1315aac Juan Quintela
    return 0;
658 d1315aac Juan Quintela
}
659 d1315aac Juan Quintela
660 16310a3c Juan Quintela
static int ram_save_iterate(QEMUFile *f, void *opaque)
661 d1315aac Juan Quintela
{
662 d1315aac Juan Quintela
    int ret;
663 d1315aac Juan Quintela
    int i;
664 e4ed1541 Juan Quintela
    int64_t t0;
665 b823ceaa Juan Quintela
    int total_sent = 0;
666 d1315aac Juan Quintela
667 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
668 b2a8658e Umesh Deshpande
669 f798b07f Umesh Deshpande
    if (ram_list.version != last_version) {
670 f798b07f Umesh Deshpande
        reset_ram_globals();
671 f798b07f Umesh Deshpande
    }
672 f798b07f Umesh Deshpande
673 0033b8b4 Michael R. Hines
    ram_control_before_iterate(f, RAM_CONTROL_ROUND);
674 0033b8b4 Michael R. Hines
675 e4ed1541 Juan Quintela
    t0 = qemu_get_clock_ns(rt_clock);
676 4508bd9e Juan Quintela
    i = 0;
677 2975725f Juan Quintela
    while ((ret = qemu_file_rate_limit(f)) == 0) {
678 3fc250b4 Pierre Riteau
        int bytes_sent;
679 ad96090a Blue Swirl
680 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, false);
681 6c779f22 Orit Wasserman
        /* no more blocks to sent */
682 b823ceaa Juan Quintela
        if (bytes_sent == 0) {
683 ad96090a Blue Swirl
            break;
684 ad96090a Blue Swirl
        }
685 b823ceaa Juan Quintela
        total_sent += bytes_sent;
686 004d4c10 Orit Wasserman
        acct_info.iterations++;
687 7ca1dfad Chegu Vinod
        check_guest_throttling();
688 4508bd9e Juan Quintela
        /* we want to check in the 1st loop, just in case it was the 1st time
689 4508bd9e Juan Quintela
           and we had to sync the dirty bitmap.
690 4508bd9e Juan Quintela
           qemu_get_clock_ns() is a bit expensive, so we only check each some
691 4508bd9e Juan Quintela
           iterations
692 4508bd9e Juan Quintela
        */
693 4508bd9e Juan Quintela
        if ((i & 63) == 0) {
694 e4ed1541 Juan Quintela
            uint64_t t1 = (qemu_get_clock_ns(rt_clock) - t0) / 1000000;
695 4508bd9e Juan Quintela
            if (t1 > MAX_WAIT) {
696 ef37a699 Igor Mitsyanko
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
697 4508bd9e Juan Quintela
                        t1, i);
698 4508bd9e Juan Quintela
                break;
699 4508bd9e Juan Quintela
            }
700 4508bd9e Juan Quintela
        }
701 4508bd9e Juan Quintela
        i++;
702 ad96090a Blue Swirl
    }
703 ad96090a Blue Swirl
704 fb3409de Paolo Bonzini
    qemu_mutex_unlock_ramlist();
705 fb3409de Paolo Bonzini
706 0033b8b4 Michael R. Hines
    /*
707 0033b8b4 Michael R. Hines
     * Must occur before EOS (or any QEMUFile operation)
708 0033b8b4 Michael R. Hines
     * because of RDMA protocol.
709 0033b8b4 Michael R. Hines
     */
710 0033b8b4 Michael R. Hines
    ram_control_after_iterate(f, RAM_CONTROL_ROUND);
711 0033b8b4 Michael R. Hines
712 2975725f Juan Quintela
    if (ret < 0) {
713 b823ceaa Juan Quintela
        bytes_transferred += total_sent;
714 2975725f Juan Quintela
        return ret;
715 2975725f Juan Quintela
    }
716 2975725f Juan Quintela
717 16310a3c Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
718 b823ceaa Juan Quintela
    total_sent += 8;
719 b823ceaa Juan Quintela
    bytes_transferred += total_sent;
720 16310a3c Juan Quintela
721 b823ceaa Juan Quintela
    return total_sent;
722 16310a3c Juan Quintela
}
723 16310a3c Juan Quintela
724 16310a3c Juan Quintela
static int ram_save_complete(QEMUFile *f, void *opaque)
725 16310a3c Juan Quintela
{
726 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
727 9c339485 Paolo Bonzini
    migration_bitmap_sync();
728 b2a8658e Umesh Deshpande
729 0033b8b4 Michael R. Hines
    ram_control_before_iterate(f, RAM_CONTROL_FINISH);
730 0033b8b4 Michael R. Hines
731 ad96090a Blue Swirl
    /* try transferring iterative blocks of memory */
732 3a697f69 Orit Wasserman
733 16310a3c Juan Quintela
    /* flush all remaining blocks regardless of rate limiting */
734 6c779f22 Orit Wasserman
    while (true) {
735 3fc250b4 Pierre Riteau
        int bytes_sent;
736 3fc250b4 Pierre Riteau
737 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, true);
738 6c779f22 Orit Wasserman
        /* no more blocks to sent */
739 b823ceaa Juan Quintela
        if (bytes_sent == 0) {
740 6c779f22 Orit Wasserman
            break;
741 ad96090a Blue Swirl
        }
742 16310a3c Juan Quintela
        bytes_transferred += bytes_sent;
743 ad96090a Blue Swirl
    }
744 0033b8b4 Michael R. Hines
745 0033b8b4 Michael R. Hines
    ram_control_after_iterate(f, RAM_CONTROL_FINISH);
746 244eaa75 Paolo Bonzini
    migration_end();
747 ad96090a Blue Swirl
748 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
749 ad96090a Blue Swirl
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
750 ad96090a Blue Swirl
751 5b3c9638 Juan Quintela
    return 0;
752 ad96090a Blue Swirl
}
753 ad96090a Blue Swirl
754 e4ed1541 Juan Quintela
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
755 e4ed1541 Juan Quintela
{
756 e4ed1541 Juan Quintela
    uint64_t remaining_size;
757 e4ed1541 Juan Quintela
758 e4ed1541 Juan Quintela
    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
759 e4ed1541 Juan Quintela
760 e4ed1541 Juan Quintela
    if (remaining_size < max_size) {
761 32c835ba Paolo Bonzini
        qemu_mutex_lock_iothread();
762 e4ed1541 Juan Quintela
        migration_bitmap_sync();
763 32c835ba Paolo Bonzini
        qemu_mutex_unlock_iothread();
764 e4ed1541 Juan Quintela
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
765 e4ed1541 Juan Quintela
    }
766 e4ed1541 Juan Quintela
    return remaining_size;
767 e4ed1541 Juan Quintela
}
768 e4ed1541 Juan Quintela
769 17ad9b35 Orit Wasserman
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
770 17ad9b35 Orit Wasserman
{
771 17ad9b35 Orit Wasserman
    int ret, rc = 0;
772 17ad9b35 Orit Wasserman
    unsigned int xh_len;
773 17ad9b35 Orit Wasserman
    int xh_flags;
774 17ad9b35 Orit Wasserman
775 17ad9b35 Orit Wasserman
    if (!XBZRLE.decoded_buf) {
776 17ad9b35 Orit Wasserman
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
777 17ad9b35 Orit Wasserman
    }
778 17ad9b35 Orit Wasserman
779 17ad9b35 Orit Wasserman
    /* extract RLE header */
780 17ad9b35 Orit Wasserman
    xh_flags = qemu_get_byte(f);
781 17ad9b35 Orit Wasserman
    xh_len = qemu_get_be16(f);
782 17ad9b35 Orit Wasserman
783 17ad9b35 Orit Wasserman
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
784 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
785 17ad9b35 Orit Wasserman
        return -1;
786 17ad9b35 Orit Wasserman
    }
787 17ad9b35 Orit Wasserman
788 17ad9b35 Orit Wasserman
    if (xh_len > TARGET_PAGE_SIZE) {
789 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
790 17ad9b35 Orit Wasserman
        return -1;
791 17ad9b35 Orit Wasserman
    }
792 17ad9b35 Orit Wasserman
    /* load data and decode */
793 17ad9b35 Orit Wasserman
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
794 17ad9b35 Orit Wasserman
795 17ad9b35 Orit Wasserman
    /* decode RLE */
796 17ad9b35 Orit Wasserman
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
797 17ad9b35 Orit Wasserman
                               TARGET_PAGE_SIZE);
798 17ad9b35 Orit Wasserman
    if (ret == -1) {
799 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
800 17ad9b35 Orit Wasserman
        rc = -1;
801 17ad9b35 Orit Wasserman
    } else  if (ret > TARGET_PAGE_SIZE) {
802 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
803 17ad9b35 Orit Wasserman
                ret, TARGET_PAGE_SIZE);
804 17ad9b35 Orit Wasserman
        abort();
805 17ad9b35 Orit Wasserman
    }
806 17ad9b35 Orit Wasserman
807 17ad9b35 Orit Wasserman
    return rc;
808 17ad9b35 Orit Wasserman
}
809 17ad9b35 Orit Wasserman
810 a55bbe31 Alex Williamson
static inline void *host_from_stream_offset(QEMUFile *f,
811 a55bbe31 Alex Williamson
                                            ram_addr_t offset,
812 a55bbe31 Alex Williamson
                                            int flags)
813 a55bbe31 Alex Williamson
{
814 a55bbe31 Alex Williamson
    static RAMBlock *block = NULL;
815 a55bbe31 Alex Williamson
    char id[256];
816 a55bbe31 Alex Williamson
    uint8_t len;
817 a55bbe31 Alex Williamson
818 a55bbe31 Alex Williamson
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
819 a55bbe31 Alex Williamson
        if (!block) {
820 a55bbe31 Alex Williamson
            fprintf(stderr, "Ack, bad migration stream!\n");
821 a55bbe31 Alex Williamson
            return NULL;
822 a55bbe31 Alex Williamson
        }
823 a55bbe31 Alex Williamson
824 dc94a7ed Avi Kivity
        return memory_region_get_ram_ptr(block->mr) + offset;
825 a55bbe31 Alex Williamson
    }
826 a55bbe31 Alex Williamson
827 a55bbe31 Alex Williamson
    len = qemu_get_byte(f);
828 a55bbe31 Alex Williamson
    qemu_get_buffer(f, (uint8_t *)id, len);
829 a55bbe31 Alex Williamson
    id[len] = 0;
830 a55bbe31 Alex Williamson
831 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
832 a55bbe31 Alex Williamson
        if (!strncmp(id, block->idstr, sizeof(id)))
833 dc94a7ed Avi Kivity
            return memory_region_get_ram_ptr(block->mr) + offset;
834 a55bbe31 Alex Williamson
    }
835 a55bbe31 Alex Williamson
836 a55bbe31 Alex Williamson
    fprintf(stderr, "Can't find block %s!\n", id);
837 a55bbe31 Alex Williamson
    return NULL;
838 a55bbe31 Alex Williamson
}
839 a55bbe31 Alex Williamson
840 44c3b58c Michael R. Hines
/*
841 44c3b58c Michael R. Hines
 * If a page (or a whole RDMA chunk) has been
842 44c3b58c Michael R. Hines
 * determined to be zero, then zap it.
843 44c3b58c Michael R. Hines
 */
844 44c3b58c Michael R. Hines
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
845 44c3b58c Michael R. Hines
{
846 44c3b58c Michael R. Hines
    if (ch != 0 || !is_zero_page(host)) {
847 44c3b58c Michael R. Hines
        memset(host, ch, size);
848 44c3b58c Michael R. Hines
#ifndef _WIN32
849 44c3b58c Michael R. Hines
        if (ch == 0 &&
850 44c3b58c Michael R. Hines
            (!kvm_enabled() || kvm_has_sync_mmu()) &&
851 44c3b58c Michael R. Hines
            getpagesize() <= TARGET_PAGE_SIZE) {
852 44c3b58c Michael R. Hines
            qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
853 44c3b58c Michael R. Hines
        }
854 44c3b58c Michael R. Hines
#endif
855 44c3b58c Michael R. Hines
    }
856 44c3b58c Michael R. Hines
}
857 44c3b58c Michael R. Hines
858 7908c78d Juan Quintela
static int ram_load(QEMUFile *f, void *opaque, int version_id)
859 ad96090a Blue Swirl
{
860 ad96090a Blue Swirl
    ram_addr_t addr;
861 3a697f69 Orit Wasserman
    int flags, ret = 0;
862 42802d47 Juan Quintela
    int error;
863 3a697f69 Orit Wasserman
    static uint64_t seq_iter;
864 3a697f69 Orit Wasserman
865 3a697f69 Orit Wasserman
    seq_iter++;
866 ad96090a Blue Swirl
867 f09f2189 Avi Kivity
    if (version_id < 4 || version_id > 4) {
868 ad96090a Blue Swirl
        return -EINVAL;
869 ad96090a Blue Swirl
    }
870 ad96090a Blue Swirl
871 ad96090a Blue Swirl
    do {
872 ad96090a Blue Swirl
        addr = qemu_get_be64(f);
873 ad96090a Blue Swirl
874 ad96090a Blue Swirl
        flags = addr & ~TARGET_PAGE_MASK;
875 ad96090a Blue Swirl
        addr &= TARGET_PAGE_MASK;
876 ad96090a Blue Swirl
877 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
878 f09f2189 Avi Kivity
            if (version_id == 4) {
879 97ab12d4 Alex Williamson
                /* Synchronize RAM block list */
880 97ab12d4 Alex Williamson
                char id[256];
881 97ab12d4 Alex Williamson
                ram_addr_t length;
882 97ab12d4 Alex Williamson
                ram_addr_t total_ram_bytes = addr;
883 97ab12d4 Alex Williamson
884 97ab12d4 Alex Williamson
                while (total_ram_bytes) {
885 97ab12d4 Alex Williamson
                    RAMBlock *block;
886 97ab12d4 Alex Williamson
                    uint8_t len;
887 97ab12d4 Alex Williamson
888 97ab12d4 Alex Williamson
                    len = qemu_get_byte(f);
889 97ab12d4 Alex Williamson
                    qemu_get_buffer(f, (uint8_t *)id, len);
890 97ab12d4 Alex Williamson
                    id[len] = 0;
891 97ab12d4 Alex Williamson
                    length = qemu_get_be64(f);
892 97ab12d4 Alex Williamson
893 a3161038 Paolo Bonzini
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
894 97ab12d4 Alex Williamson
                        if (!strncmp(id, block->idstr, sizeof(id))) {
895 3a697f69 Orit Wasserman
                            if (block->length != length) {
896 6bedfe94 Stefan Weil
                                fprintf(stderr,
897 6bedfe94 Stefan Weil
                                        "Length mismatch: %s: " RAM_ADDR_FMT
898 6bedfe94 Stefan Weil
                                        " in != " RAM_ADDR_FMT "\n", id, length,
899 87d2f825 Alon Levy
                                        block->length);
900 3a697f69 Orit Wasserman
                                ret =  -EINVAL;
901 3a697f69 Orit Wasserman
                                goto done;
902 3a697f69 Orit Wasserman
                            }
903 97ab12d4 Alex Williamson
                            break;
904 97ab12d4 Alex Williamson
                        }
905 97ab12d4 Alex Williamson
                    }
906 97ab12d4 Alex Williamson
907 97ab12d4 Alex Williamson
                    if (!block) {
908 fb787f81 Alex Williamson
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
909 fb787f81 Alex Williamson
                                "accept migration\n", id);
910 3a697f69 Orit Wasserman
                        ret = -EINVAL;
911 3a697f69 Orit Wasserman
                        goto done;
912 97ab12d4 Alex Williamson
                    }
913 97ab12d4 Alex Williamson
914 97ab12d4 Alex Williamson
                    total_ram_bytes -= length;
915 97ab12d4 Alex Williamson
                }
916 ad96090a Blue Swirl
            }
917 ad96090a Blue Swirl
        }
918 ad96090a Blue Swirl
919 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
920 97ab12d4 Alex Williamson
            void *host;
921 97ab12d4 Alex Williamson
            uint8_t ch;
922 97ab12d4 Alex Williamson
923 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
924 492fb99c Michael S. Tsirkin
            if (!host) {
925 492fb99c Michael S. Tsirkin
                return -EINVAL;
926 492fb99c Michael S. Tsirkin
            }
927 97ab12d4 Alex Williamson
928 97ab12d4 Alex Williamson
            ch = qemu_get_byte(f);
929 44c3b58c Michael R. Hines
            ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
930 ad96090a Blue Swirl
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
931 97ab12d4 Alex Williamson
            void *host;
932 97ab12d4 Alex Williamson
933 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
934 0ff1f9f5 Orit Wasserman
            if (!host) {
935 0ff1f9f5 Orit Wasserman
                return -EINVAL;
936 0ff1f9f5 Orit Wasserman
            }
937 97ab12d4 Alex Williamson
938 97ab12d4 Alex Williamson
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
939 17ad9b35 Orit Wasserman
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
940 17ad9b35 Orit Wasserman
            void *host = host_from_stream_offset(f, addr, flags);
941 17ad9b35 Orit Wasserman
            if (!host) {
942 17ad9b35 Orit Wasserman
                return -EINVAL;
943 17ad9b35 Orit Wasserman
            }
944 17ad9b35 Orit Wasserman
945 17ad9b35 Orit Wasserman
            if (load_xbzrle(f, addr, host) < 0) {
946 17ad9b35 Orit Wasserman
                ret = -EINVAL;
947 17ad9b35 Orit Wasserman
                goto done;
948 17ad9b35 Orit Wasserman
            }
949 0033b8b4 Michael R. Hines
        } else if (flags & RAM_SAVE_FLAG_HOOK) {
950 0033b8b4 Michael R. Hines
            ram_control_load_hook(f, flags);
951 ad96090a Blue Swirl
        }
952 42802d47 Juan Quintela
        error = qemu_file_get_error(f);
953 42802d47 Juan Quintela
        if (error) {
954 3a697f69 Orit Wasserman
            ret = error;
955 3a697f69 Orit Wasserman
            goto done;
956 ad96090a Blue Swirl
        }
957 ad96090a Blue Swirl
    } while (!(flags & RAM_SAVE_FLAG_EOS));
958 ad96090a Blue Swirl
959 3a697f69 Orit Wasserman
done:
960 ef37a699 Igor Mitsyanko
    DPRINTF("Completed load of VM with exit code %d seq iteration "
961 ef37a699 Igor Mitsyanko
            "%" PRIu64 "\n", ret, seq_iter);
962 3a697f69 Orit Wasserman
    return ret;
963 ad96090a Blue Swirl
}
964 ad96090a Blue Swirl
965 7908c78d Juan Quintela
SaveVMHandlers savevm_ram_handlers = {
966 d1315aac Juan Quintela
    .save_live_setup = ram_save_setup,
967 16310a3c Juan Quintela
    .save_live_iterate = ram_save_iterate,
968 16310a3c Juan Quintela
    .save_live_complete = ram_save_complete,
969 e4ed1541 Juan Quintela
    .save_live_pending = ram_save_pending,
970 7908c78d Juan Quintela
    .load_state = ram_load,
971 9b5bfab0 Juan Quintela
    .cancel = ram_migration_cancel,
972 7908c78d Juan Quintela
};
973 7908c78d Juan Quintela
974 0dfa5ef9 Isaku Yamahata
struct soundhw {
975 0dfa5ef9 Isaku Yamahata
    const char *name;
976 0dfa5ef9 Isaku Yamahata
    const char *descr;
977 0dfa5ef9 Isaku Yamahata
    int enabled;
978 0dfa5ef9 Isaku Yamahata
    int isa;
979 0dfa5ef9 Isaku Yamahata
    union {
980 4a0f031d Hervรฉ Poussineau
        int (*init_isa) (ISABus *bus);
981 0dfa5ef9 Isaku Yamahata
        int (*init_pci) (PCIBus *bus);
982 0dfa5ef9 Isaku Yamahata
    } init;
983 0dfa5ef9 Isaku Yamahata
};
984 0dfa5ef9 Isaku Yamahata
985 36cd6f6f Paolo Bonzini
static struct soundhw soundhw[9];
986 36cd6f6f Paolo Bonzini
static int soundhw_count;
987 ad96090a Blue Swirl
988 36cd6f6f Paolo Bonzini
void isa_register_soundhw(const char *name, const char *descr,
989 36cd6f6f Paolo Bonzini
                          int (*init_isa)(ISABus *bus))
990 36cd6f6f Paolo Bonzini
{
991 36cd6f6f Paolo Bonzini
    assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
992 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].name = name;
993 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].descr = descr;
994 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].isa = 1;
995 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].init.init_isa = init_isa;
996 36cd6f6f Paolo Bonzini
    soundhw_count++;
997 36cd6f6f Paolo Bonzini
}
998 ad96090a Blue Swirl
999 36cd6f6f Paolo Bonzini
void pci_register_soundhw(const char *name, const char *descr,
1000 36cd6f6f Paolo Bonzini
                          int (*init_pci)(PCIBus *bus))
1001 36cd6f6f Paolo Bonzini
{
1002 36cd6f6f Paolo Bonzini
    assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1003 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].name = name;
1004 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].descr = descr;
1005 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].isa = 0;
1006 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].init.init_pci = init_pci;
1007 36cd6f6f Paolo Bonzini
    soundhw_count++;
1008 36cd6f6f Paolo Bonzini
}
1009 ad96090a Blue Swirl
1010 ad96090a Blue Swirl
void select_soundhw(const char *optarg)
1011 ad96090a Blue Swirl
{
1012 ad96090a Blue Swirl
    struct soundhw *c;
1013 ad96090a Blue Swirl
1014 c8057f95 Peter Maydell
    if (is_help_option(optarg)) {
1015 ad96090a Blue Swirl
    show_valid_cards:
1016 ad96090a Blue Swirl
1017 36cd6f6f Paolo Bonzini
        if (soundhw_count) {
1018 36cd6f6f Paolo Bonzini
             printf("Valid sound card names (comma separated):\n");
1019 36cd6f6f Paolo Bonzini
             for (c = soundhw; c->name; ++c) {
1020 36cd6f6f Paolo Bonzini
                 printf ("%-11s %s\n", c->name, c->descr);
1021 36cd6f6f Paolo Bonzini
             }
1022 36cd6f6f Paolo Bonzini
             printf("\n-soundhw all will enable all of the above\n");
1023 36cd6f6f Paolo Bonzini
        } else {
1024 36cd6f6f Paolo Bonzini
             printf("Machine has no user-selectable audio hardware "
1025 36cd6f6f Paolo Bonzini
                    "(it may or may not have always-present audio hardware).\n");
1026 ad96090a Blue Swirl
        }
1027 c8057f95 Peter Maydell
        exit(!is_help_option(optarg));
1028 ad96090a Blue Swirl
    }
1029 ad96090a Blue Swirl
    else {
1030 ad96090a Blue Swirl
        size_t l;
1031 ad96090a Blue Swirl
        const char *p;
1032 ad96090a Blue Swirl
        char *e;
1033 ad96090a Blue Swirl
        int bad_card = 0;
1034 ad96090a Blue Swirl
1035 ad96090a Blue Swirl
        if (!strcmp(optarg, "all")) {
1036 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1037 ad96090a Blue Swirl
                c->enabled = 1;
1038 ad96090a Blue Swirl
            }
1039 ad96090a Blue Swirl
            return;
1040 ad96090a Blue Swirl
        }
1041 ad96090a Blue Swirl
1042 ad96090a Blue Swirl
        p = optarg;
1043 ad96090a Blue Swirl
        while (*p) {
1044 ad96090a Blue Swirl
            e = strchr(p, ',');
1045 ad96090a Blue Swirl
            l = !e ? strlen(p) : (size_t) (e - p);
1046 ad96090a Blue Swirl
1047 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1048 ad96090a Blue Swirl
                if (!strncmp(c->name, p, l) && !c->name[l]) {
1049 ad96090a Blue Swirl
                    c->enabled = 1;
1050 ad96090a Blue Swirl
                    break;
1051 ad96090a Blue Swirl
                }
1052 ad96090a Blue Swirl
            }
1053 ad96090a Blue Swirl
1054 ad96090a Blue Swirl
            if (!c->name) {
1055 ad96090a Blue Swirl
                if (l > 80) {
1056 ad96090a Blue Swirl
                    fprintf(stderr,
1057 ad96090a Blue Swirl
                            "Unknown sound card name (too big to show)\n");
1058 ad96090a Blue Swirl
                }
1059 ad96090a Blue Swirl
                else {
1060 ad96090a Blue Swirl
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
1061 ad96090a Blue Swirl
                            (int) l, p);
1062 ad96090a Blue Swirl
                }
1063 ad96090a Blue Swirl
                bad_card = 1;
1064 ad96090a Blue Swirl
            }
1065 ad96090a Blue Swirl
            p += l + (e != NULL);
1066 ad96090a Blue Swirl
        }
1067 ad96090a Blue Swirl
1068 ad96090a Blue Swirl
        if (bad_card) {
1069 ad96090a Blue Swirl
            goto show_valid_cards;
1070 ad96090a Blue Swirl
        }
1071 ad96090a Blue Swirl
    }
1072 ad96090a Blue Swirl
}
1073 0dfa5ef9 Isaku Yamahata
1074 f81222bc Paolo Bonzini
void audio_init(void)
1075 0dfa5ef9 Isaku Yamahata
{
1076 0dfa5ef9 Isaku Yamahata
    struct soundhw *c;
1077 f81222bc Paolo Bonzini
    ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1078 f81222bc Paolo Bonzini
    PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1079 0dfa5ef9 Isaku Yamahata
1080 0dfa5ef9 Isaku Yamahata
    for (c = soundhw; c->name; ++c) {
1081 0dfa5ef9 Isaku Yamahata
        if (c->enabled) {
1082 0dfa5ef9 Isaku Yamahata
            if (c->isa) {
1083 f81222bc Paolo Bonzini
                if (!isa_bus) {
1084 f81222bc Paolo Bonzini
                    fprintf(stderr, "ISA bus not available for %s\n", c->name);
1085 f81222bc Paolo Bonzini
                    exit(1);
1086 0dfa5ef9 Isaku Yamahata
                }
1087 f81222bc Paolo Bonzini
                c->init.init_isa(isa_bus);
1088 0dfa5ef9 Isaku Yamahata
            } else {
1089 f81222bc Paolo Bonzini
                if (!pci_bus) {
1090 f81222bc Paolo Bonzini
                    fprintf(stderr, "PCI bus not available for %s\n", c->name);
1091 f81222bc Paolo Bonzini
                    exit(1);
1092 0dfa5ef9 Isaku Yamahata
                }
1093 f81222bc Paolo Bonzini
                c->init.init_pci(pci_bus);
1094 0dfa5ef9 Isaku Yamahata
            }
1095 0dfa5ef9 Isaku Yamahata
        }
1096 0dfa5ef9 Isaku Yamahata
    }
1097 0dfa5ef9 Isaku Yamahata
}
1098 ad96090a Blue Swirl
1099 ad96090a Blue Swirl
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1100 ad96090a Blue Swirl
{
1101 ad96090a Blue Swirl
    int ret;
1102 ad96090a Blue Swirl
1103 ad96090a Blue Swirl
    if (strlen(str) != 36) {
1104 ad96090a Blue Swirl
        return -1;
1105 ad96090a Blue Swirl
    }
1106 ad96090a Blue Swirl
1107 ad96090a Blue Swirl
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1108 ad96090a Blue Swirl
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1109 ad96090a Blue Swirl
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1110 ad96090a Blue Swirl
                 &uuid[15]);
1111 ad96090a Blue Swirl
1112 ad96090a Blue Swirl
    if (ret != 16) {
1113 ad96090a Blue Swirl
        return -1;
1114 ad96090a Blue Swirl
    }
1115 ad96090a Blue Swirl
#ifdef TARGET_I386
1116 ebc85e3f Markus Armbruster
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), uuid, 16);
1117 ad96090a Blue Swirl
#endif
1118 ad96090a Blue Swirl
    return 0;
1119 ad96090a Blue Swirl
}
1120 ad96090a Blue Swirl
1121 0c764a9d Laszlo Ersek
void do_acpitable_option(const QemuOpts *opts)
1122 ad96090a Blue Swirl
{
1123 ad96090a Blue Swirl
#ifdef TARGET_I386
1124 23084327 Laszlo Ersek
    Error *err = NULL;
1125 23084327 Laszlo Ersek
1126 23084327 Laszlo Ersek
    acpi_table_add(opts, &err);
1127 23084327 Laszlo Ersek
    if (err) {
1128 23084327 Laszlo Ersek
        fprintf(stderr, "Wrong acpi table provided: %s\n",
1129 23084327 Laszlo Ersek
                error_get_pretty(err));
1130 23084327 Laszlo Ersek
        error_free(err);
1131 ad96090a Blue Swirl
        exit(1);
1132 ad96090a Blue Swirl
    }
1133 ad96090a Blue Swirl
#endif
1134 ad96090a Blue Swirl
}
1135 ad96090a Blue Swirl
1136 ad96090a Blue Swirl
void do_smbios_option(const char *optarg)
1137 ad96090a Blue Swirl
{
1138 ad96090a Blue Swirl
#ifdef TARGET_I386
1139 ad96090a Blue Swirl
    if (smbios_entry_add(optarg) < 0) {
1140 ad96090a Blue Swirl
        exit(1);
1141 ad96090a Blue Swirl
    }
1142 ad96090a Blue Swirl
#endif
1143 ad96090a Blue Swirl
}
1144 ad96090a Blue Swirl
1145 ad96090a Blue Swirl
void cpudef_init(void)
1146 ad96090a Blue Swirl
{
1147 ad96090a Blue Swirl
#if defined(cpudef_setup)
1148 ad96090a Blue Swirl
    cpudef_setup(); /* parse cpu definitions in target config file */
1149 ad96090a Blue Swirl
#endif
1150 ad96090a Blue Swirl
}
1151 ad96090a Blue Swirl
1152 303d4e86 Anthony PERARD
int tcg_available(void)
1153 303d4e86 Anthony PERARD
{
1154 303d4e86 Anthony PERARD
    return 1;
1155 303d4e86 Anthony PERARD
}
1156 303d4e86 Anthony PERARD
1157 ad96090a Blue Swirl
int kvm_available(void)
1158 ad96090a Blue Swirl
{
1159 ad96090a Blue Swirl
#ifdef CONFIG_KVM
1160 ad96090a Blue Swirl
    return 1;
1161 ad96090a Blue Swirl
#else
1162 ad96090a Blue Swirl
    return 0;
1163 ad96090a Blue Swirl
#endif
1164 ad96090a Blue Swirl
}
1165 ad96090a Blue Swirl
1166 ad96090a Blue Swirl
int xen_available(void)
1167 ad96090a Blue Swirl
{
1168 ad96090a Blue Swirl
#ifdef CONFIG_XEN
1169 ad96090a Blue Swirl
    return 1;
1170 ad96090a Blue Swirl
#else
1171 ad96090a Blue Swirl
    return 0;
1172 ad96090a Blue Swirl
#endif
1173 ad96090a Blue Swirl
}
1174 99afc91d Daniel P. Berrange
1175 99afc91d Daniel P. Berrange
1176 99afc91d Daniel P. Berrange
TargetInfo *qmp_query_target(Error **errp)
1177 99afc91d Daniel P. Berrange
{
1178 99afc91d Daniel P. Berrange
    TargetInfo *info = g_malloc0(sizeof(*info));
1179 99afc91d Daniel P. Berrange
1180 c02a9552 Paolo Bonzini
    info->arch = g_strdup(TARGET_NAME);
1181 99afc91d Daniel P. Berrange
1182 99afc91d Daniel P. Berrange
    return info;
1183 99afc91d Daniel P. Berrange
}
1184 7ca1dfad Chegu Vinod
1185 7ca1dfad Chegu Vinod
/* Stub function that's gets run on the vcpu when its brought out of the
1186 7ca1dfad Chegu Vinod
   VM to run inside qemu via async_run_on_cpu()*/
1187 7ca1dfad Chegu Vinod
static void mig_sleep_cpu(void *opq)
1188 7ca1dfad Chegu Vinod
{
1189 7ca1dfad Chegu Vinod
    qemu_mutex_unlock_iothread();
1190 7ca1dfad Chegu Vinod
    g_usleep(30*1000);
1191 7ca1dfad Chegu Vinod
    qemu_mutex_lock_iothread();
1192 7ca1dfad Chegu Vinod
}
1193 7ca1dfad Chegu Vinod
1194 7ca1dfad Chegu Vinod
/* To reduce the dirty rate explicitly disallow the VCPUs from spending
1195 7ca1dfad Chegu Vinod
   much time in the VM. The migration thread will try to catchup.
1196 7ca1dfad Chegu Vinod
   Workload will experience a performance drop.
1197 7ca1dfad Chegu Vinod
*/
1198 7ca1dfad Chegu Vinod
static void mig_throttle_cpu_down(CPUState *cpu, void *data)
1199 7ca1dfad Chegu Vinod
{
1200 7ca1dfad Chegu Vinod
    async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1201 7ca1dfad Chegu Vinod
}
1202 7ca1dfad Chegu Vinod
1203 7ca1dfad Chegu Vinod
static void mig_throttle_guest_down(void)
1204 7ca1dfad Chegu Vinod
{
1205 7ca1dfad Chegu Vinod
    qemu_mutex_lock_iothread();
1206 7ca1dfad Chegu Vinod
    qemu_for_each_cpu(mig_throttle_cpu_down, NULL);
1207 7ca1dfad Chegu Vinod
    qemu_mutex_unlock_iothread();
1208 7ca1dfad Chegu Vinod
}
1209 7ca1dfad Chegu Vinod
1210 7ca1dfad Chegu Vinod
static void check_guest_throttling(void)
1211 7ca1dfad Chegu Vinod
{
1212 7ca1dfad Chegu Vinod
    static int64_t t0;
1213 7ca1dfad Chegu Vinod
    int64_t        t1;
1214 7ca1dfad Chegu Vinod
1215 7ca1dfad Chegu Vinod
    if (!mig_throttle_on) {
1216 7ca1dfad Chegu Vinod
        return;
1217 7ca1dfad Chegu Vinod
    }
1218 7ca1dfad Chegu Vinod
1219 7ca1dfad Chegu Vinod
    if (!t0)  {
1220 7ca1dfad Chegu Vinod
        t0 = qemu_get_clock_ns(rt_clock);
1221 7ca1dfad Chegu Vinod
        return;
1222 7ca1dfad Chegu Vinod
    }
1223 7ca1dfad Chegu Vinod
1224 7ca1dfad Chegu Vinod
    t1 = qemu_get_clock_ns(rt_clock);
1225 7ca1dfad Chegu Vinod
1226 7ca1dfad Chegu Vinod
    /* If it has been more than 40 ms since the last time the guest
1227 7ca1dfad Chegu Vinod
     * was throttled then do it again.
1228 7ca1dfad Chegu Vinod
     */
1229 7ca1dfad Chegu Vinod
    if (40 < (t1-t0)/1000000) {
1230 7ca1dfad Chegu Vinod
        mig_throttle_guest_down();
1231 7ca1dfad Chegu Vinod
        t0 = t1;
1232 7ca1dfad Chegu Vinod
    }
1233 7ca1dfad Chegu Vinod
}