Statistics
| Branch: | Revision:

root / arch_init.c @ 34b5d2c6

History | View | Annotate | Download (33 kB)

1 ad96090a Blue Swirl
/*
2 ad96090a Blue Swirl
 * QEMU System Emulator
3 ad96090a Blue Swirl
 *
4 ad96090a Blue Swirl
 * Copyright (c) 2003-2008 Fabrice Bellard
5 ad96090a Blue Swirl
 *
6 ad96090a Blue Swirl
 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 ad96090a Blue Swirl
 * of this software and associated documentation files (the "Software"), to deal
8 ad96090a Blue Swirl
 * in the Software without restriction, including without limitation the rights
9 ad96090a Blue Swirl
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 ad96090a Blue Swirl
 * copies of the Software, and to permit persons to whom the Software is
11 ad96090a Blue Swirl
 * furnished to do so, subject to the following conditions:
12 ad96090a Blue Swirl
 *
13 ad96090a Blue Swirl
 * The above copyright notice and this permission notice shall be included in
14 ad96090a Blue Swirl
 * all copies or substantial portions of the Software.
15 ad96090a Blue Swirl
 *
16 ad96090a Blue Swirl
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 ad96090a Blue Swirl
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 ad96090a Blue Swirl
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 ad96090a Blue Swirl
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 ad96090a Blue Swirl
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 ad96090a Blue Swirl
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 ad96090a Blue Swirl
 * THE SOFTWARE.
23 ad96090a Blue Swirl
 */
24 ad96090a Blue Swirl
#include <stdint.h>
25 ad96090a Blue Swirl
#include <stdarg.h>
26 b2e0a138 Michael S. Tsirkin
#include <stdlib.h>
27 ad96090a Blue Swirl
#ifndef _WIN32
28 1c47cb16 Blue Swirl
#include <sys/types.h>
29 ad96090a Blue Swirl
#include <sys/mman.h>
30 ad96090a Blue Swirl
#endif
31 ad96090a Blue Swirl
#include "config.h"
32 83c9089e Paolo Bonzini
#include "monitor/monitor.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
34 1de7afc9 Paolo Bonzini
#include "qemu/bitops.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/bitmap.h"
36 9c17d615 Paolo Bonzini
#include "sysemu/arch_init.h"
37 ad96090a Blue Swirl
#include "audio/audio.h"
38 0d09e41a Paolo Bonzini
#include "hw/i386/pc.h"
39 a2cb15b0 Michael S. Tsirkin
#include "hw/pci/pci.h"
40 0d09e41a Paolo Bonzini
#include "hw/audio/audio.h"
41 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
42 caf71f86 Paolo Bonzini
#include "migration/migration.h"
43 0d09e41a Paolo Bonzini
#include "hw/i386/smbios.h"
44 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
45 0d09e41a Paolo Bonzini
#include "hw/audio/pcspk.h"
46 caf71f86 Paolo Bonzini
#include "migration/page_cache.h"
47 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
48 99afc91d Daniel P. Berrange
#include "qmp-commands.h"
49 3c12193d Juan Quintela
#include "trace.h"
50 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
51 0445259b Michael S. Tsirkin
#include "hw/acpi/acpi.h"
52 ad96090a Blue Swirl
53 3a697f69 Orit Wasserman
#ifdef DEBUG_ARCH_INIT
54 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
55 3a697f69 Orit Wasserman
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
56 3a697f69 Orit Wasserman
#else
57 3a697f69 Orit Wasserman
#define DPRINTF(fmt, ...) \
58 3a697f69 Orit Wasserman
    do { } while (0)
59 3a697f69 Orit Wasserman
#endif
60 3a697f69 Orit Wasserman
61 ad96090a Blue Swirl
#ifdef TARGET_SPARC
62 ad96090a Blue Swirl
int graphic_width = 1024;
63 ad96090a Blue Swirl
int graphic_height = 768;
64 ad96090a Blue Swirl
int graphic_depth = 8;
65 ad96090a Blue Swirl
#else
66 ad96090a Blue Swirl
int graphic_width = 800;
67 ad96090a Blue Swirl
int graphic_height = 600;
68 f1ff0e89 Alexander Graf
int graphic_depth = 32;
69 ad96090a Blue Swirl
#endif
70 ad96090a Blue Swirl
71 ad96090a Blue Swirl
72 ad96090a Blue Swirl
#if defined(TARGET_ALPHA)
73 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ALPHA
74 ad96090a Blue Swirl
#elif defined(TARGET_ARM)
75 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_ARM
76 ad96090a Blue Swirl
#elif defined(TARGET_CRIS)
77 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_CRIS
78 ad96090a Blue Swirl
#elif defined(TARGET_I386)
79 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_I386
80 ad96090a Blue Swirl
#elif defined(TARGET_M68K)
81 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_M68K
82 81ea0e13 Michael Walle
#elif defined(TARGET_LM32)
83 81ea0e13 Michael Walle
#define QEMU_ARCH QEMU_ARCH_LM32
84 ad96090a Blue Swirl
#elif defined(TARGET_MICROBLAZE)
85 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
86 ad96090a Blue Swirl
#elif defined(TARGET_MIPS)
87 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_MIPS
88 d15a9c23 Anthony Green
#elif defined(TARGET_MOXIE)
89 d15a9c23 Anthony Green
#define QEMU_ARCH QEMU_ARCH_MOXIE
90 e67db06e Jia Liu
#elif defined(TARGET_OPENRISC)
91 e67db06e Jia Liu
#define QEMU_ARCH QEMU_ARCH_OPENRISC
92 ad96090a Blue Swirl
#elif defined(TARGET_PPC)
93 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_PPC
94 ad96090a Blue Swirl
#elif defined(TARGET_S390X)
95 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_S390X
96 ad96090a Blue Swirl
#elif defined(TARGET_SH4)
97 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SH4
98 ad96090a Blue Swirl
#elif defined(TARGET_SPARC)
99 ad96090a Blue Swirl
#define QEMU_ARCH QEMU_ARCH_SPARC
100 2328826b Max Filippov
#elif defined(TARGET_XTENSA)
101 2328826b Max Filippov
#define QEMU_ARCH QEMU_ARCH_XTENSA
102 4f23a1e6 Guan Xuetao
#elif defined(TARGET_UNICORE32)
103 4f23a1e6 Guan Xuetao
#define QEMU_ARCH QEMU_ARCH_UNICORE32
104 ad96090a Blue Swirl
#endif
105 ad96090a Blue Swirl
106 ad96090a Blue Swirl
const uint32_t arch_type = QEMU_ARCH;
107 7ca1dfad Chegu Vinod
static bool mig_throttle_on;
108 7ca1dfad Chegu Vinod
static int dirty_rate_high_cnt;
109 7ca1dfad Chegu Vinod
static void check_guest_throttling(void);
110 ad96090a Blue Swirl
111 ad96090a Blue Swirl
/***********************************************************/
112 ad96090a Blue Swirl
/* ram save/restore */
113 ad96090a Blue Swirl
114 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
115 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_COMPRESS 0x02
116 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
117 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_PAGE     0x08
118 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_EOS      0x10
119 d20878d2 Yoshiaki Tamura
#define RAM_SAVE_FLAG_CONTINUE 0x20
120 17ad9b35 Orit Wasserman
#define RAM_SAVE_FLAG_XBZRLE   0x40
121 0033b8b4 Michael R. Hines
/* 0x80 is reserved in migration.h start with 0x100 next */
122 ad96090a Blue Swirl
123 b5a8fe5e Eduardo Habkost
124 756557de Eduardo Habkost
static struct defconfig_file {
125 756557de Eduardo Habkost
    const char *filename;
126 f29a5614 Eduardo Habkost
    /* Indicates it is an user config file (disabled by -no-user-config) */
127 f29a5614 Eduardo Habkost
    bool userconfig;
128 756557de Eduardo Habkost
} default_config_files[] = {
129 f29a5614 Eduardo Habkost
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
130 2e59915d Paolo Bonzini
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
131 756557de Eduardo Habkost
    { NULL }, /* end of list */
132 756557de Eduardo Habkost
};
133 756557de Eduardo Habkost
134 756557de Eduardo Habkost
135 f29a5614 Eduardo Habkost
int qemu_read_default_config_files(bool userconfig)
136 b5a8fe5e Eduardo Habkost
{
137 b5a8fe5e Eduardo Habkost
    int ret;
138 756557de Eduardo Habkost
    struct defconfig_file *f;
139 b5a8fe5e Eduardo Habkost
140 756557de Eduardo Habkost
    for (f = default_config_files; f->filename; f++) {
141 f29a5614 Eduardo Habkost
        if (!userconfig && f->userconfig) {
142 f29a5614 Eduardo Habkost
            continue;
143 f29a5614 Eduardo Habkost
        }
144 756557de Eduardo Habkost
        ret = qemu_read_config_file(f->filename);
145 756557de Eduardo Habkost
        if (ret < 0 && ret != -ENOENT) {
146 756557de Eduardo Habkost
            return ret;
147 756557de Eduardo Habkost
        }
148 b5a8fe5e Eduardo Habkost
    }
149 4d8b3c63 Laszlo Ersek
150 b5a8fe5e Eduardo Habkost
    return 0;
151 b5a8fe5e Eduardo Habkost
}
152 b5a8fe5e Eduardo Habkost
153 3edcd7e6 Peter Lieven
static inline bool is_zero_page(uint8_t *p)
154 ad96090a Blue Swirl
{
155 3edcd7e6 Peter Lieven
    return buffer_find_nonzero_offset(p, TARGET_PAGE_SIZE) ==
156 3edcd7e6 Peter Lieven
        TARGET_PAGE_SIZE;
157 ad96090a Blue Swirl
}
158 ad96090a Blue Swirl
159 17ad9b35 Orit Wasserman
/* struct contains XBZRLE cache and a static page
160 17ad9b35 Orit Wasserman
   used by the compression */
161 17ad9b35 Orit Wasserman
static struct {
162 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE encoding */
163 17ad9b35 Orit Wasserman
    uint8_t *encoded_buf;
164 17ad9b35 Orit Wasserman
    /* buffer for storing page content */
165 17ad9b35 Orit Wasserman
    uint8_t *current_buf;
166 17ad9b35 Orit Wasserman
    /* buffer used for XBZRLE decoding */
167 17ad9b35 Orit Wasserman
    uint8_t *decoded_buf;
168 17ad9b35 Orit Wasserman
    /* Cache for XBZRLE */
169 17ad9b35 Orit Wasserman
    PageCache *cache;
170 17ad9b35 Orit Wasserman
} XBZRLE = {
171 17ad9b35 Orit Wasserman
    .encoded_buf = NULL,
172 17ad9b35 Orit Wasserman
    .current_buf = NULL,
173 17ad9b35 Orit Wasserman
    .decoded_buf = NULL,
174 17ad9b35 Orit Wasserman
    .cache = NULL,
175 17ad9b35 Orit Wasserman
};
176 17ad9b35 Orit Wasserman
177 9e1ba4cc Orit Wasserman
178 9e1ba4cc Orit Wasserman
int64_t xbzrle_cache_resize(int64_t new_size)
179 9e1ba4cc Orit Wasserman
{
180 9e1ba4cc Orit Wasserman
    if (XBZRLE.cache != NULL) {
181 9e1ba4cc Orit Wasserman
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
182 9e1ba4cc Orit Wasserman
            TARGET_PAGE_SIZE;
183 9e1ba4cc Orit Wasserman
    }
184 9e1ba4cc Orit Wasserman
    return pow2floor(new_size);
185 9e1ba4cc Orit Wasserman
}
186 9e1ba4cc Orit Wasserman
187 004d4c10 Orit Wasserman
/* accounting for migration statistics */
188 004d4c10 Orit Wasserman
typedef struct AccountingInfo {
189 004d4c10 Orit Wasserman
    uint64_t dup_pages;
190 f1c72795 Peter Lieven
    uint64_t skipped_pages;
191 004d4c10 Orit Wasserman
    uint64_t norm_pages;
192 004d4c10 Orit Wasserman
    uint64_t iterations;
193 f36d55af Orit Wasserman
    uint64_t xbzrle_bytes;
194 f36d55af Orit Wasserman
    uint64_t xbzrle_pages;
195 f36d55af Orit Wasserman
    uint64_t xbzrle_cache_miss;
196 f36d55af Orit Wasserman
    uint64_t xbzrle_overflows;
197 004d4c10 Orit Wasserman
} AccountingInfo;
198 004d4c10 Orit Wasserman
199 004d4c10 Orit Wasserman
static AccountingInfo acct_info;
200 004d4c10 Orit Wasserman
201 004d4c10 Orit Wasserman
static void acct_clear(void)
202 004d4c10 Orit Wasserman
{
203 004d4c10 Orit Wasserman
    memset(&acct_info, 0, sizeof(acct_info));
204 004d4c10 Orit Wasserman
}
205 004d4c10 Orit Wasserman
206 004d4c10 Orit Wasserman
uint64_t dup_mig_bytes_transferred(void)
207 004d4c10 Orit Wasserman
{
208 004d4c10 Orit Wasserman
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
209 004d4c10 Orit Wasserman
}
210 004d4c10 Orit Wasserman
211 004d4c10 Orit Wasserman
uint64_t dup_mig_pages_transferred(void)
212 004d4c10 Orit Wasserman
{
213 004d4c10 Orit Wasserman
    return acct_info.dup_pages;
214 004d4c10 Orit Wasserman
}
215 004d4c10 Orit Wasserman
216 f1c72795 Peter Lieven
uint64_t skipped_mig_bytes_transferred(void)
217 f1c72795 Peter Lieven
{
218 f1c72795 Peter Lieven
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
219 f1c72795 Peter Lieven
}
220 f1c72795 Peter Lieven
221 f1c72795 Peter Lieven
uint64_t skipped_mig_pages_transferred(void)
222 f1c72795 Peter Lieven
{
223 f1c72795 Peter Lieven
    return acct_info.skipped_pages;
224 f1c72795 Peter Lieven
}
225 f1c72795 Peter Lieven
226 004d4c10 Orit Wasserman
uint64_t norm_mig_bytes_transferred(void)
227 004d4c10 Orit Wasserman
{
228 004d4c10 Orit Wasserman
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
229 004d4c10 Orit Wasserman
}
230 004d4c10 Orit Wasserman
231 004d4c10 Orit Wasserman
uint64_t norm_mig_pages_transferred(void)
232 004d4c10 Orit Wasserman
{
233 004d4c10 Orit Wasserman
    return acct_info.norm_pages;
234 004d4c10 Orit Wasserman
}
235 004d4c10 Orit Wasserman
236 f36d55af Orit Wasserman
uint64_t xbzrle_mig_bytes_transferred(void)
237 f36d55af Orit Wasserman
{
238 f36d55af Orit Wasserman
    return acct_info.xbzrle_bytes;
239 f36d55af Orit Wasserman
}
240 f36d55af Orit Wasserman
241 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_transferred(void)
242 f36d55af Orit Wasserman
{
243 f36d55af Orit Wasserman
    return acct_info.xbzrle_pages;
244 f36d55af Orit Wasserman
}
245 f36d55af Orit Wasserman
246 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_cache_miss(void)
247 f36d55af Orit Wasserman
{
248 f36d55af Orit Wasserman
    return acct_info.xbzrle_cache_miss;
249 f36d55af Orit Wasserman
}
250 f36d55af Orit Wasserman
251 f36d55af Orit Wasserman
uint64_t xbzrle_mig_pages_overflow(void)
252 f36d55af Orit Wasserman
{
253 f36d55af Orit Wasserman
    return acct_info.xbzrle_overflows;
254 f36d55af Orit Wasserman
}
255 f36d55af Orit Wasserman
256 3f7d7b09 Juan Quintela
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
257 3f7d7b09 Juan Quintela
                             int cont, int flag)
258 0c51f43d Orit Wasserman
{
259 3f7d7b09 Juan Quintela
    size_t size;
260 3f7d7b09 Juan Quintela
261 3f7d7b09 Juan Quintela
    qemu_put_be64(f, offset | cont | flag);
262 3f7d7b09 Juan Quintela
    size = 8;
263 0c51f43d Orit Wasserman
264 3f7d7b09 Juan Quintela
    if (!cont) {
265 3f7d7b09 Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
266 3f7d7b09 Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr,
267 3f7d7b09 Juan Quintela
                        strlen(block->idstr));
268 3f7d7b09 Juan Quintela
        size += 1 + strlen(block->idstr);
269 3f7d7b09 Juan Quintela
    }
270 3f7d7b09 Juan Quintela
    return size;
271 0c51f43d Orit Wasserman
}
272 0c51f43d Orit Wasserman
273 17ad9b35 Orit Wasserman
#define ENCODING_FLAG_XBZRLE 0x1
274 17ad9b35 Orit Wasserman
275 17ad9b35 Orit Wasserman
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
276 17ad9b35 Orit Wasserman
                            ram_addr_t current_addr, RAMBlock *block,
277 dd051c72 Juan Quintela
                            ram_addr_t offset, int cont, bool last_stage)
278 17ad9b35 Orit Wasserman
{
279 17ad9b35 Orit Wasserman
    int encoded_len = 0, bytes_sent = -1;
280 17ad9b35 Orit Wasserman
    uint8_t *prev_cached_page;
281 17ad9b35 Orit Wasserman
282 17ad9b35 Orit Wasserman
    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
283 dd051c72 Juan Quintela
        if (!last_stage) {
284 ee0b44aa Peter Lieven
            cache_insert(XBZRLE.cache, current_addr, current_data);
285 dd051c72 Juan Quintela
        }
286 f36d55af Orit Wasserman
        acct_info.xbzrle_cache_miss++;
287 17ad9b35 Orit Wasserman
        return -1;
288 17ad9b35 Orit Wasserman
    }
289 17ad9b35 Orit Wasserman
290 17ad9b35 Orit Wasserman
    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
291 17ad9b35 Orit Wasserman
292 17ad9b35 Orit Wasserman
    /* save current buffer into memory */
293 17ad9b35 Orit Wasserman
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
294 17ad9b35 Orit Wasserman
295 17ad9b35 Orit Wasserman
    /* XBZRLE encoding (if there is no overflow) */
296 17ad9b35 Orit Wasserman
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
297 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
298 17ad9b35 Orit Wasserman
                                       TARGET_PAGE_SIZE);
299 17ad9b35 Orit Wasserman
    if (encoded_len == 0) {
300 17ad9b35 Orit Wasserman
        DPRINTF("Skipping unmodified page\n");
301 17ad9b35 Orit Wasserman
        return 0;
302 17ad9b35 Orit Wasserman
    } else if (encoded_len == -1) {
303 17ad9b35 Orit Wasserman
        DPRINTF("Overflow\n");
304 f36d55af Orit Wasserman
        acct_info.xbzrle_overflows++;
305 17ad9b35 Orit Wasserman
        /* update data in the cache */
306 17ad9b35 Orit Wasserman
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
307 17ad9b35 Orit Wasserman
        return -1;
308 17ad9b35 Orit Wasserman
    }
309 17ad9b35 Orit Wasserman
310 17ad9b35 Orit Wasserman
    /* we need to update the data in the cache, in order to get the same data */
311 dd051c72 Juan Quintela
    if (!last_stage) {
312 dd051c72 Juan Quintela
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
313 dd051c72 Juan Quintela
    }
314 17ad9b35 Orit Wasserman
315 17ad9b35 Orit Wasserman
    /* Send XBZRLE based compressed page */
316 3f7d7b09 Juan Quintela
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
317 17ad9b35 Orit Wasserman
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
318 17ad9b35 Orit Wasserman
    qemu_put_be16(f, encoded_len);
319 17ad9b35 Orit Wasserman
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
320 3f7d7b09 Juan Quintela
    bytes_sent += encoded_len + 1 + 2;
321 f36d55af Orit Wasserman
    acct_info.xbzrle_pages++;
322 f36d55af Orit Wasserman
    acct_info.xbzrle_bytes += bytes_sent;
323 17ad9b35 Orit Wasserman
324 17ad9b35 Orit Wasserman
    return bytes_sent;
325 17ad9b35 Orit Wasserman
}
326 17ad9b35 Orit Wasserman
327 b23a9a5c Juan Quintela
328 b23a9a5c Juan Quintela
/* This is the last block that we have visited serching for dirty pages
329 b23a9a5c Juan Quintela
 */
330 b23a9a5c Juan Quintela
static RAMBlock *last_seen_block;
331 5f718a15 Juan Quintela
/* This is the last block from where we have sent data */
332 5f718a15 Juan Quintela
static RAMBlock *last_sent_block;
333 760e77ea Alex Williamson
static ram_addr_t last_offset;
334 c6bf8e0e Juan Quintela
static unsigned long *migration_bitmap;
335 c6bf8e0e Juan Quintela
static uint64_t migration_dirty_pages;
336 f798b07f Umesh Deshpande
static uint32_t last_version;
337 78d07ae7 Peter Lieven
static bool ram_bulk_stage;
338 760e77ea Alex Williamson
339 4c8ae0f6 Juan Quintela
static inline
340 4c8ae0f6 Juan Quintela
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
341 4c8ae0f6 Juan Quintela
                                                 ram_addr_t start)
342 69268cde Juan Quintela
{
343 4c8ae0f6 Juan Quintela
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
344 4c8ae0f6 Juan Quintela
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
345 0851c9f7 Michael S. Tsirkin
    uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
346 0851c9f7 Michael S. Tsirkin
    unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
347 c6bf8e0e Juan Quintela
348 70c8652b Peter Lieven
    unsigned long next;
349 70c8652b Peter Lieven
350 70c8652b Peter Lieven
    if (ram_bulk_stage && nr > base) {
351 70c8652b Peter Lieven
        next = nr + 1;
352 70c8652b Peter Lieven
    } else {
353 70c8652b Peter Lieven
        next = find_next_bit(migration_bitmap, size, nr);
354 70c8652b Peter Lieven
    }
355 69268cde Juan Quintela
356 4c8ae0f6 Juan Quintela
    if (next < size) {
357 4c8ae0f6 Juan Quintela
        clear_bit(next, migration_bitmap);
358 c6bf8e0e Juan Quintela
        migration_dirty_pages--;
359 69268cde Juan Quintela
    }
360 4c8ae0f6 Juan Quintela
    return (next - base) << TARGET_PAGE_BITS;
361 69268cde Juan Quintela
}
362 69268cde Juan Quintela
363 c6bf8e0e Juan Quintela
static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
364 c6bf8e0e Juan Quintela
                                              ram_addr_t offset)
365 e44d26c8 Juan Quintela
{
366 c6bf8e0e Juan Quintela
    bool ret;
367 c6bf8e0e Juan Quintela
    int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
368 e44d26c8 Juan Quintela
369 c6bf8e0e Juan Quintela
    ret = test_and_set_bit(nr, migration_bitmap);
370 c6bf8e0e Juan Quintela
371 c6bf8e0e Juan Quintela
    if (!ret) {
372 c6bf8e0e Juan Quintela
        migration_dirty_pages++;
373 e44d26c8 Juan Quintela
    }
374 c6bf8e0e Juan Quintela
    return ret;
375 e44d26c8 Juan Quintela
}
376 e44d26c8 Juan Quintela
377 32c835ba Paolo Bonzini
/* Needs iothread lock! */
378 32c835ba Paolo Bonzini
379 dd2df737 Juan Quintela
static void migration_bitmap_sync(void)
380 dd2df737 Juan Quintela
{
381 c6bf8e0e Juan Quintela
    RAMBlock *block;
382 c6bf8e0e Juan Quintela
    ram_addr_t addr;
383 c6bf8e0e Juan Quintela
    uint64_t num_dirty_pages_init = migration_dirty_pages;
384 8d017193 Juan Quintela
    MigrationState *s = migrate_get_current();
385 8d017193 Juan Quintela
    static int64_t start_time;
386 7ca1dfad Chegu Vinod
    static int64_t bytes_xfer_prev;
387 8d017193 Juan Quintela
    static int64_t num_dirty_pages_period;
388 8d017193 Juan Quintela
    int64_t end_time;
389 7ca1dfad Chegu Vinod
    int64_t bytes_xfer_now;
390 7ca1dfad Chegu Vinod
391 7ca1dfad Chegu Vinod
    if (!bytes_xfer_prev) {
392 7ca1dfad Chegu Vinod
        bytes_xfer_prev = ram_bytes_transferred();
393 7ca1dfad Chegu Vinod
    }
394 8d017193 Juan Quintela
395 8d017193 Juan Quintela
    if (!start_time) {
396 bc72ad67 Alex Bligh
        start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
397 8d017193 Juan Quintela
    }
398 3c12193d Juan Quintela
399 3c12193d Juan Quintela
    trace_migration_bitmap_sync_start();
400 1d671369 Paolo Bonzini
    address_space_sync_dirty_bitmap(&address_space_memory);
401 c6bf8e0e Juan Quintela
402 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
403 c6bf8e0e Juan Quintela
        for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
404 ece79318 Juan Quintela
            if (memory_region_test_and_clear_dirty(block->mr,
405 ece79318 Juan Quintela
                                                   addr, TARGET_PAGE_SIZE,
406 ece79318 Juan Quintela
                                                   DIRTY_MEMORY_MIGRATION)) {
407 c6bf8e0e Juan Quintela
                migration_bitmap_set_dirty(block->mr, addr);
408 c6bf8e0e Juan Quintela
            }
409 c6bf8e0e Juan Quintela
        }
410 c6bf8e0e Juan Quintela
    }
411 c6bf8e0e Juan Quintela
    trace_migration_bitmap_sync_end(migration_dirty_pages
412 3c12193d Juan Quintela
                                    - num_dirty_pages_init);
413 8d017193 Juan Quintela
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
414 bc72ad67 Alex Bligh
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
415 8d017193 Juan Quintela
416 8d017193 Juan Quintela
    /* more than 1 second = 1000 millisecons */
417 8d017193 Juan Quintela
    if (end_time > start_time + 1000) {
418 7ca1dfad Chegu Vinod
        if (migrate_auto_converge()) {
419 7ca1dfad Chegu Vinod
            /* The following detection logic can be refined later. For now:
420 7ca1dfad Chegu Vinod
               Check to see if the dirtied bytes is 50% more than the approx.
421 7ca1dfad Chegu Vinod
               amount of bytes that just got transferred since the last time we
422 7ca1dfad Chegu Vinod
               were in this routine. If that happens >N times (for now N==4)
423 7ca1dfad Chegu Vinod
               we turn on the throttle down logic */
424 7ca1dfad Chegu Vinod
            bytes_xfer_now = ram_bytes_transferred();
425 7ca1dfad Chegu Vinod
            if (s->dirty_pages_rate &&
426 7ca1dfad Chegu Vinod
               (num_dirty_pages_period * TARGET_PAGE_SIZE >
427 7ca1dfad Chegu Vinod
                   (bytes_xfer_now - bytes_xfer_prev)/2) &&
428 7ca1dfad Chegu Vinod
               (dirty_rate_high_cnt++ > 4)) {
429 7ca1dfad Chegu Vinod
                    trace_migration_throttle();
430 7ca1dfad Chegu Vinod
                    mig_throttle_on = true;
431 7ca1dfad Chegu Vinod
                    dirty_rate_high_cnt = 0;
432 7ca1dfad Chegu Vinod
             }
433 7ca1dfad Chegu Vinod
             bytes_xfer_prev = bytes_xfer_now;
434 7ca1dfad Chegu Vinod
        } else {
435 7ca1dfad Chegu Vinod
             mig_throttle_on = false;
436 7ca1dfad Chegu Vinod
        }
437 8d017193 Juan Quintela
        s->dirty_pages_rate = num_dirty_pages_period * 1000
438 8d017193 Juan Quintela
            / (end_time - start_time);
439 90f8ae72 Juan Quintela
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
440 8d017193 Juan Quintela
        start_time = end_time;
441 8d017193 Juan Quintela
        num_dirty_pages_period = 0;
442 8d017193 Juan Quintela
    }
443 dd2df737 Juan Quintela
}
444 dd2df737 Juan Quintela
445 6c779f22 Orit Wasserman
/*
446 6c779f22 Orit Wasserman
 * ram_save_block: Writes a page of memory to the stream f
447 6c779f22 Orit Wasserman
 *
448 b823ceaa Juan Quintela
 * Returns:  The number of bytes written.
449 b823ceaa Juan Quintela
 *           0 means no dirty pages
450 6c779f22 Orit Wasserman
 */
451 6c779f22 Orit Wasserman
452 dd051c72 Juan Quintela
static int ram_save_block(QEMUFile *f, bool last_stage)
453 ad96090a Blue Swirl
{
454 b23a9a5c Juan Quintela
    RAMBlock *block = last_seen_block;
455 e44359c3 Alex Williamson
    ram_addr_t offset = last_offset;
456 4c8ae0f6 Juan Quintela
    bool complete_round = false;
457 b823ceaa Juan Quintela
    int bytes_sent = 0;
458 71c510e2 Avi Kivity
    MemoryRegion *mr;
459 17ad9b35 Orit Wasserman
    ram_addr_t current_addr;
460 ad96090a Blue Swirl
461 e44359c3 Alex Williamson
    if (!block)
462 a3161038 Paolo Bonzini
        block = QTAILQ_FIRST(&ram_list.blocks);
463 e44359c3 Alex Williamson
464 4c8ae0f6 Juan Quintela
    while (true) {
465 71c510e2 Avi Kivity
        mr = block->mr;
466 4c8ae0f6 Juan Quintela
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
467 4c8ae0f6 Juan Quintela
        if (complete_round && block == last_seen_block &&
468 4c8ae0f6 Juan Quintela
            offset >= last_offset) {
469 4c8ae0f6 Juan Quintela
            break;
470 4c8ae0f6 Juan Quintela
        }
471 4c8ae0f6 Juan Quintela
        if (offset >= block->length) {
472 4c8ae0f6 Juan Quintela
            offset = 0;
473 4c8ae0f6 Juan Quintela
            block = QTAILQ_NEXT(block, next);
474 4c8ae0f6 Juan Quintela
            if (!block) {
475 4c8ae0f6 Juan Quintela
                block = QTAILQ_FIRST(&ram_list.blocks);
476 4c8ae0f6 Juan Quintela
                complete_round = true;
477 78d07ae7 Peter Lieven
                ram_bulk_stage = false;
478 4c8ae0f6 Juan Quintela
            }
479 4c8ae0f6 Juan Quintela
        } else {
480 0033b8b4 Michael R. Hines
            int ret;
481 ad96090a Blue Swirl
            uint8_t *p;
482 5f718a15 Juan Quintela
            int cont = (block == last_sent_block) ?
483 b23a9a5c Juan Quintela
                RAM_SAVE_FLAG_CONTINUE : 0;
484 ad96090a Blue Swirl
485 71c510e2 Avi Kivity
            p = memory_region_get_ram_ptr(mr) + offset;
486 ad96090a Blue Swirl
487 b823ceaa Juan Quintela
            /* In doubt sent page as normal */
488 b823ceaa Juan Quintela
            bytes_sent = -1;
489 0033b8b4 Michael R. Hines
            ret = ram_control_save_page(f, block->offset,
490 0033b8b4 Michael R. Hines
                               offset, TARGET_PAGE_SIZE, &bytes_sent);
491 0033b8b4 Michael R. Hines
492 0033b8b4 Michael R. Hines
            if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
493 0033b8b4 Michael R. Hines
                if (ret != RAM_SAVE_CONTROL_DELAYED) {
494 0033b8b4 Michael R. Hines
                    if (bytes_sent > 0) {
495 0033b8b4 Michael R. Hines
                        acct_info.norm_pages++;
496 0033b8b4 Michael R. Hines
                    } else if (bytes_sent == 0) {
497 0033b8b4 Michael R. Hines
                        acct_info.dup_pages++;
498 0033b8b4 Michael R. Hines
                    }
499 0033b8b4 Michael R. Hines
                }
500 0033b8b4 Michael R. Hines
            } else if (is_zero_page(p)) {
501 004d4c10 Orit Wasserman
                acct_info.dup_pages++;
502 9ef051e5 Peter Lieven
                bytes_sent = save_block_hdr(f, block, offset, cont,
503 9ef051e5 Peter Lieven
                                            RAM_SAVE_FLAG_COMPRESS);
504 9ef051e5 Peter Lieven
                qemu_put_byte(f, 0);
505 9ef051e5 Peter Lieven
                bytes_sent++;
506 5cc11c46 Peter Lieven
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
507 17ad9b35 Orit Wasserman
                current_addr = block->offset + offset;
508 17ad9b35 Orit Wasserman
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
509 dd051c72 Juan Quintela
                                              offset, cont, last_stage);
510 dd051c72 Juan Quintela
                if (!last_stage) {
511 dd051c72 Juan Quintela
                    p = get_cached_data(XBZRLE.cache, current_addr);
512 dd051c72 Juan Quintela
                }
513 17ad9b35 Orit Wasserman
            }
514 17ad9b35 Orit Wasserman
515 b823ceaa Juan Quintela
            /* XBZRLE overflow or normal page */
516 17ad9b35 Orit Wasserman
            if (bytes_sent == -1) {
517 3f7d7b09 Juan Quintela
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
518 500f0061 Orit Wasserman
                qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
519 3f7d7b09 Juan Quintela
                bytes_sent += TARGET_PAGE_SIZE;
520 004d4c10 Orit Wasserman
                acct_info.norm_pages++;
521 ad96090a Blue Swirl
            }
522 ad96090a Blue Swirl
523 17ad9b35 Orit Wasserman
            /* if page is unmodified, continue to the next */
524 b823ceaa Juan Quintela
            if (bytes_sent > 0) {
525 5f718a15 Juan Quintela
                last_sent_block = block;
526 17ad9b35 Orit Wasserman
                break;
527 17ad9b35 Orit Wasserman
            }
528 ad96090a Blue Swirl
        }
529 4c8ae0f6 Juan Quintela
    }
530 b23a9a5c Juan Quintela
    last_seen_block = block;
531 e44359c3 Alex Williamson
    last_offset = offset;
532 ad96090a Blue Swirl
533 3fc250b4 Pierre Riteau
    return bytes_sent;
534 ad96090a Blue Swirl
}
535 ad96090a Blue Swirl
536 ad96090a Blue Swirl
static uint64_t bytes_transferred;
537 ad96090a Blue Swirl
538 2b0ce079 Michael R. Hines
void acct_update_position(QEMUFile *f, size_t size, bool zero)
539 2b0ce079 Michael R. Hines
{
540 2b0ce079 Michael R. Hines
    uint64_t pages = size / TARGET_PAGE_SIZE;
541 2b0ce079 Michael R. Hines
    if (zero) {
542 2b0ce079 Michael R. Hines
        acct_info.dup_pages += pages;
543 2b0ce079 Michael R. Hines
    } else {
544 2b0ce079 Michael R. Hines
        acct_info.norm_pages += pages;
545 2b0ce079 Michael R. Hines
        bytes_transferred += size;
546 2b0ce079 Michael R. Hines
        qemu_update_position(f, size);
547 2b0ce079 Michael R. Hines
    }
548 2b0ce079 Michael R. Hines
}
549 2b0ce079 Michael R. Hines
550 ad96090a Blue Swirl
static ram_addr_t ram_save_remaining(void)
551 ad96090a Blue Swirl
{
552 c6bf8e0e Juan Quintela
    return migration_dirty_pages;
553 ad96090a Blue Swirl
}
554 ad96090a Blue Swirl
555 ad96090a Blue Swirl
uint64_t ram_bytes_remaining(void)
556 ad96090a Blue Swirl
{
557 ad96090a Blue Swirl
    return ram_save_remaining() * TARGET_PAGE_SIZE;
558 ad96090a Blue Swirl
}
559 ad96090a Blue Swirl
560 ad96090a Blue Swirl
uint64_t ram_bytes_transferred(void)
561 ad96090a Blue Swirl
{
562 ad96090a Blue Swirl
    return bytes_transferred;
563 ad96090a Blue Swirl
}
564 ad96090a Blue Swirl
565 ad96090a Blue Swirl
uint64_t ram_bytes_total(void)
566 ad96090a Blue Swirl
{
567 d17b5288 Alex Williamson
    RAMBlock *block;
568 d17b5288 Alex Williamson
    uint64_t total = 0;
569 d17b5288 Alex Williamson
570 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
571 d17b5288 Alex Williamson
        total += block->length;
572 d17b5288 Alex Williamson
573 d17b5288 Alex Williamson
    return total;
574 ad96090a Blue Swirl
}
575 ad96090a Blue Swirl
576 8e21cd32 Orit Wasserman
static void migration_end(void)
577 8e21cd32 Orit Wasserman
{
578 244eaa75 Paolo Bonzini
    if (migration_bitmap) {
579 244eaa75 Paolo Bonzini
        memory_global_dirty_log_stop();
580 244eaa75 Paolo Bonzini
        g_free(migration_bitmap);
581 244eaa75 Paolo Bonzini
        migration_bitmap = NULL;
582 244eaa75 Paolo Bonzini
    }
583 17ad9b35 Orit Wasserman
584 244eaa75 Paolo Bonzini
    if (XBZRLE.cache) {
585 17ad9b35 Orit Wasserman
        cache_fini(XBZRLE.cache);
586 17ad9b35 Orit Wasserman
        g_free(XBZRLE.cache);
587 17ad9b35 Orit Wasserman
        g_free(XBZRLE.encoded_buf);
588 17ad9b35 Orit Wasserman
        g_free(XBZRLE.current_buf);
589 17ad9b35 Orit Wasserman
        g_free(XBZRLE.decoded_buf);
590 17ad9b35 Orit Wasserman
        XBZRLE.cache = NULL;
591 17ad9b35 Orit Wasserman
    }
592 8e21cd32 Orit Wasserman
}
593 8e21cd32 Orit Wasserman
594 9b5bfab0 Juan Quintela
static void ram_migration_cancel(void *opaque)
595 9b5bfab0 Juan Quintela
{
596 9b5bfab0 Juan Quintela
    migration_end();
597 9b5bfab0 Juan Quintela
}
598 9b5bfab0 Juan Quintela
599 5a170775 Juan Quintela
static void reset_ram_globals(void)
600 5a170775 Juan Quintela
{
601 b23a9a5c Juan Quintela
    last_seen_block = NULL;
602 5f718a15 Juan Quintela
    last_sent_block = NULL;
603 5a170775 Juan Quintela
    last_offset = 0;
604 f798b07f Umesh Deshpande
    last_version = ram_list.version;
605 78d07ae7 Peter Lieven
    ram_bulk_stage = true;
606 5a170775 Juan Quintela
}
607 5a170775 Juan Quintela
608 4508bd9e Juan Quintela
#define MAX_WAIT 50 /* ms, half buffered_file limit */
609 4508bd9e Juan Quintela
610 d1315aac Juan Quintela
static int ram_save_setup(QEMUFile *f, void *opaque)
611 ad96090a Blue Swirl
{
612 d1315aac Juan Quintela
    RAMBlock *block;
613 c6bf8e0e Juan Quintela
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
614 c6bf8e0e Juan Quintela
615 c6bf8e0e Juan Quintela
    migration_bitmap = bitmap_new(ram_pages);
616 7ec81e56 David Gibson
    bitmap_set(migration_bitmap, 0, ram_pages);
617 c6bf8e0e Juan Quintela
    migration_dirty_pages = ram_pages;
618 7ca1dfad Chegu Vinod
    mig_throttle_on = false;
619 7ca1dfad Chegu Vinod
    dirty_rate_high_cnt = 0;
620 ad96090a Blue Swirl
621 17ad9b35 Orit Wasserman
    if (migrate_use_xbzrle()) {
622 17ad9b35 Orit Wasserman
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
623 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE,
624 17ad9b35 Orit Wasserman
                                  TARGET_PAGE_SIZE);
625 17ad9b35 Orit Wasserman
        if (!XBZRLE.cache) {
626 17ad9b35 Orit Wasserman
            DPRINTF("Error creating cache\n");
627 17ad9b35 Orit Wasserman
            return -1;
628 17ad9b35 Orit Wasserman
        }
629 17ad9b35 Orit Wasserman
        XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
630 17ad9b35 Orit Wasserman
        XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
631 004d4c10 Orit Wasserman
        acct_clear();
632 17ad9b35 Orit Wasserman
    }
633 17ad9b35 Orit Wasserman
634 9b095037 Paolo Bonzini
    qemu_mutex_lock_iothread();
635 9b095037 Paolo Bonzini
    qemu_mutex_lock_ramlist();
636 9b095037 Paolo Bonzini
    bytes_transferred = 0;
637 9b095037 Paolo Bonzini
    reset_ram_globals();
638 9b095037 Paolo Bonzini
639 d1315aac Juan Quintela
    memory_global_dirty_log_start();
640 c6bf8e0e Juan Quintela
    migration_bitmap_sync();
641 9b095037 Paolo Bonzini
    qemu_mutex_unlock_iothread();
642 ad96090a Blue Swirl
643 d1315aac Juan Quintela
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
644 97ab12d4 Alex Williamson
645 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
646 d1315aac Juan Quintela
        qemu_put_byte(f, strlen(block->idstr));
647 d1315aac Juan Quintela
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
648 d1315aac Juan Quintela
        qemu_put_be64(f, block->length);
649 ad96090a Blue Swirl
    }
650 ad96090a Blue Swirl
651 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
652 0033b8b4 Michael R. Hines
653 0033b8b4 Michael R. Hines
    ram_control_before_iterate(f, RAM_CONTROL_SETUP);
654 0033b8b4 Michael R. Hines
    ram_control_after_iterate(f, RAM_CONTROL_SETUP);
655 0033b8b4 Michael R. Hines
656 d1315aac Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
657 d1315aac Juan Quintela
658 d1315aac Juan Quintela
    return 0;
659 d1315aac Juan Quintela
}
660 d1315aac Juan Quintela
661 16310a3c Juan Quintela
static int ram_save_iterate(QEMUFile *f, void *opaque)
662 d1315aac Juan Quintela
{
663 d1315aac Juan Quintela
    int ret;
664 d1315aac Juan Quintela
    int i;
665 e4ed1541 Juan Quintela
    int64_t t0;
666 b823ceaa Juan Quintela
    int total_sent = 0;
667 d1315aac Juan Quintela
668 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
669 b2a8658e Umesh Deshpande
670 f798b07f Umesh Deshpande
    if (ram_list.version != last_version) {
671 f798b07f Umesh Deshpande
        reset_ram_globals();
672 f798b07f Umesh Deshpande
    }
673 f798b07f Umesh Deshpande
674 0033b8b4 Michael R. Hines
    ram_control_before_iterate(f, RAM_CONTROL_ROUND);
675 0033b8b4 Michael R. Hines
676 bc72ad67 Alex Bligh
    t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
677 4508bd9e Juan Quintela
    i = 0;
678 2975725f Juan Quintela
    while ((ret = qemu_file_rate_limit(f)) == 0) {
679 3fc250b4 Pierre Riteau
        int bytes_sent;
680 ad96090a Blue Swirl
681 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, false);
682 6c779f22 Orit Wasserman
        /* no more blocks to sent */
683 b823ceaa Juan Quintela
        if (bytes_sent == 0) {
684 ad96090a Blue Swirl
            break;
685 ad96090a Blue Swirl
        }
686 b823ceaa Juan Quintela
        total_sent += bytes_sent;
687 004d4c10 Orit Wasserman
        acct_info.iterations++;
688 7ca1dfad Chegu Vinod
        check_guest_throttling();
689 4508bd9e Juan Quintela
        /* we want to check in the 1st loop, just in case it was the 1st time
690 4508bd9e Juan Quintela
           and we had to sync the dirty bitmap.
691 4508bd9e Juan Quintela
           qemu_get_clock_ns() is a bit expensive, so we only check each some
692 4508bd9e Juan Quintela
           iterations
693 4508bd9e Juan Quintela
        */
694 4508bd9e Juan Quintela
        if ((i & 63) == 0) {
695 bc72ad67 Alex Bligh
            uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
696 4508bd9e Juan Quintela
            if (t1 > MAX_WAIT) {
697 ef37a699 Igor Mitsyanko
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
698 4508bd9e Juan Quintela
                        t1, i);
699 4508bd9e Juan Quintela
                break;
700 4508bd9e Juan Quintela
            }
701 4508bd9e Juan Quintela
        }
702 4508bd9e Juan Quintela
        i++;
703 ad96090a Blue Swirl
    }
704 ad96090a Blue Swirl
705 fb3409de Paolo Bonzini
    qemu_mutex_unlock_ramlist();
706 fb3409de Paolo Bonzini
707 0033b8b4 Michael R. Hines
    /*
708 0033b8b4 Michael R. Hines
     * Must occur before EOS (or any QEMUFile operation)
709 0033b8b4 Michael R. Hines
     * because of RDMA protocol.
710 0033b8b4 Michael R. Hines
     */
711 0033b8b4 Michael R. Hines
    ram_control_after_iterate(f, RAM_CONTROL_ROUND);
712 0033b8b4 Michael R. Hines
713 2975725f Juan Quintela
    if (ret < 0) {
714 b823ceaa Juan Quintela
        bytes_transferred += total_sent;
715 2975725f Juan Quintela
        return ret;
716 2975725f Juan Quintela
    }
717 2975725f Juan Quintela
718 16310a3c Juan Quintela
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
719 b823ceaa Juan Quintela
    total_sent += 8;
720 b823ceaa Juan Quintela
    bytes_transferred += total_sent;
721 16310a3c Juan Quintela
722 b823ceaa Juan Quintela
    return total_sent;
723 16310a3c Juan Quintela
}
724 16310a3c Juan Quintela
725 16310a3c Juan Quintela
static int ram_save_complete(QEMUFile *f, void *opaque)
726 16310a3c Juan Quintela
{
727 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
728 9c339485 Paolo Bonzini
    migration_bitmap_sync();
729 b2a8658e Umesh Deshpande
730 0033b8b4 Michael R. Hines
    ram_control_before_iterate(f, RAM_CONTROL_FINISH);
731 0033b8b4 Michael R. Hines
732 ad96090a Blue Swirl
    /* try transferring iterative blocks of memory */
733 3a697f69 Orit Wasserman
734 16310a3c Juan Quintela
    /* flush all remaining blocks regardless of rate limiting */
735 6c779f22 Orit Wasserman
    while (true) {
736 3fc250b4 Pierre Riteau
        int bytes_sent;
737 3fc250b4 Pierre Riteau
738 dd051c72 Juan Quintela
        bytes_sent = ram_save_block(f, true);
739 6c779f22 Orit Wasserman
        /* no more blocks to sent */
740 b823ceaa Juan Quintela
        if (bytes_sent == 0) {
741 6c779f22 Orit Wasserman
            break;
742 ad96090a Blue Swirl
        }
743 16310a3c Juan Quintela
        bytes_transferred += bytes_sent;
744 ad96090a Blue Swirl
    }
745 0033b8b4 Michael R. Hines
746 0033b8b4 Michael R. Hines
    ram_control_after_iterate(f, RAM_CONTROL_FINISH);
747 244eaa75 Paolo Bonzini
    migration_end();
748 ad96090a Blue Swirl
749 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
750 ad96090a Blue Swirl
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
751 ad96090a Blue Swirl
752 5b3c9638 Juan Quintela
    return 0;
753 ad96090a Blue Swirl
}
754 ad96090a Blue Swirl
755 e4ed1541 Juan Quintela
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
756 e4ed1541 Juan Quintela
{
757 e4ed1541 Juan Quintela
    uint64_t remaining_size;
758 e4ed1541 Juan Quintela
759 e4ed1541 Juan Quintela
    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
760 e4ed1541 Juan Quintela
761 e4ed1541 Juan Quintela
    if (remaining_size < max_size) {
762 32c835ba Paolo Bonzini
        qemu_mutex_lock_iothread();
763 e4ed1541 Juan Quintela
        migration_bitmap_sync();
764 32c835ba Paolo Bonzini
        qemu_mutex_unlock_iothread();
765 e4ed1541 Juan Quintela
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
766 e4ed1541 Juan Quintela
    }
767 e4ed1541 Juan Quintela
    return remaining_size;
768 e4ed1541 Juan Quintela
}
769 e4ed1541 Juan Quintela
770 17ad9b35 Orit Wasserman
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
771 17ad9b35 Orit Wasserman
{
772 17ad9b35 Orit Wasserman
    int ret, rc = 0;
773 17ad9b35 Orit Wasserman
    unsigned int xh_len;
774 17ad9b35 Orit Wasserman
    int xh_flags;
775 17ad9b35 Orit Wasserman
776 17ad9b35 Orit Wasserman
    if (!XBZRLE.decoded_buf) {
777 17ad9b35 Orit Wasserman
        XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
778 17ad9b35 Orit Wasserman
    }
779 17ad9b35 Orit Wasserman
780 17ad9b35 Orit Wasserman
    /* extract RLE header */
781 17ad9b35 Orit Wasserman
    xh_flags = qemu_get_byte(f);
782 17ad9b35 Orit Wasserman
    xh_len = qemu_get_be16(f);
783 17ad9b35 Orit Wasserman
784 17ad9b35 Orit Wasserman
    if (xh_flags != ENCODING_FLAG_XBZRLE) {
785 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
786 17ad9b35 Orit Wasserman
        return -1;
787 17ad9b35 Orit Wasserman
    }
788 17ad9b35 Orit Wasserman
789 17ad9b35 Orit Wasserman
    if (xh_len > TARGET_PAGE_SIZE) {
790 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
791 17ad9b35 Orit Wasserman
        return -1;
792 17ad9b35 Orit Wasserman
    }
793 17ad9b35 Orit Wasserman
    /* load data and decode */
794 17ad9b35 Orit Wasserman
    qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
795 17ad9b35 Orit Wasserman
796 17ad9b35 Orit Wasserman
    /* decode RLE */
797 17ad9b35 Orit Wasserman
    ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
798 17ad9b35 Orit Wasserman
                               TARGET_PAGE_SIZE);
799 17ad9b35 Orit Wasserman
    if (ret == -1) {
800 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
801 17ad9b35 Orit Wasserman
        rc = -1;
802 17ad9b35 Orit Wasserman
    } else  if (ret > TARGET_PAGE_SIZE) {
803 17ad9b35 Orit Wasserman
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
804 17ad9b35 Orit Wasserman
                ret, TARGET_PAGE_SIZE);
805 17ad9b35 Orit Wasserman
        abort();
806 17ad9b35 Orit Wasserman
    }
807 17ad9b35 Orit Wasserman
808 17ad9b35 Orit Wasserman
    return rc;
809 17ad9b35 Orit Wasserman
}
810 17ad9b35 Orit Wasserman
811 a55bbe31 Alex Williamson
static inline void *host_from_stream_offset(QEMUFile *f,
812 a55bbe31 Alex Williamson
                                            ram_addr_t offset,
813 a55bbe31 Alex Williamson
                                            int flags)
814 a55bbe31 Alex Williamson
{
815 a55bbe31 Alex Williamson
    static RAMBlock *block = NULL;
816 a55bbe31 Alex Williamson
    char id[256];
817 a55bbe31 Alex Williamson
    uint8_t len;
818 a55bbe31 Alex Williamson
819 a55bbe31 Alex Williamson
    if (flags & RAM_SAVE_FLAG_CONTINUE) {
820 a55bbe31 Alex Williamson
        if (!block) {
821 a55bbe31 Alex Williamson
            fprintf(stderr, "Ack, bad migration stream!\n");
822 a55bbe31 Alex Williamson
            return NULL;
823 a55bbe31 Alex Williamson
        }
824 a55bbe31 Alex Williamson
825 dc94a7ed Avi Kivity
        return memory_region_get_ram_ptr(block->mr) + offset;
826 a55bbe31 Alex Williamson
    }
827 a55bbe31 Alex Williamson
828 a55bbe31 Alex Williamson
    len = qemu_get_byte(f);
829 a55bbe31 Alex Williamson
    qemu_get_buffer(f, (uint8_t *)id, len);
830 a55bbe31 Alex Williamson
    id[len] = 0;
831 a55bbe31 Alex Williamson
832 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
833 a55bbe31 Alex Williamson
        if (!strncmp(id, block->idstr, sizeof(id)))
834 dc94a7ed Avi Kivity
            return memory_region_get_ram_ptr(block->mr) + offset;
835 a55bbe31 Alex Williamson
    }
836 a55bbe31 Alex Williamson
837 a55bbe31 Alex Williamson
    fprintf(stderr, "Can't find block %s!\n", id);
838 a55bbe31 Alex Williamson
    return NULL;
839 a55bbe31 Alex Williamson
}
840 a55bbe31 Alex Williamson
841 44c3b58c Michael R. Hines
/*
842 44c3b58c Michael R. Hines
 * If a page (or a whole RDMA chunk) has been
843 44c3b58c Michael R. Hines
 * determined to be zero, then zap it.
844 44c3b58c Michael R. Hines
 */
845 44c3b58c Michael R. Hines
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
846 44c3b58c Michael R. Hines
{
847 44c3b58c Michael R. Hines
    if (ch != 0 || !is_zero_page(host)) {
848 44c3b58c Michael R. Hines
        memset(host, ch, size);
849 44c3b58c Michael R. Hines
#ifndef _WIN32
850 44c3b58c Michael R. Hines
        if (ch == 0 &&
851 44c3b58c Michael R. Hines
            (!kvm_enabled() || kvm_has_sync_mmu()) &&
852 44c3b58c Michael R. Hines
            getpagesize() <= TARGET_PAGE_SIZE) {
853 44c3b58c Michael R. Hines
            qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
854 44c3b58c Michael R. Hines
        }
855 44c3b58c Michael R. Hines
#endif
856 44c3b58c Michael R. Hines
    }
857 44c3b58c Michael R. Hines
}
858 44c3b58c Michael R. Hines
859 7908c78d Juan Quintela
static int ram_load(QEMUFile *f, void *opaque, int version_id)
860 ad96090a Blue Swirl
{
861 ad96090a Blue Swirl
    ram_addr_t addr;
862 3a697f69 Orit Wasserman
    int flags, ret = 0;
863 42802d47 Juan Quintela
    int error;
864 3a697f69 Orit Wasserman
    static uint64_t seq_iter;
865 3a697f69 Orit Wasserman
866 3a697f69 Orit Wasserman
    seq_iter++;
867 ad96090a Blue Swirl
868 f09f2189 Avi Kivity
    if (version_id < 4 || version_id > 4) {
869 ad96090a Blue Swirl
        return -EINVAL;
870 ad96090a Blue Swirl
    }
871 ad96090a Blue Swirl
872 ad96090a Blue Swirl
    do {
873 ad96090a Blue Swirl
        addr = qemu_get_be64(f);
874 ad96090a Blue Swirl
875 ad96090a Blue Swirl
        flags = addr & ~TARGET_PAGE_MASK;
876 ad96090a Blue Swirl
        addr &= TARGET_PAGE_MASK;
877 ad96090a Blue Swirl
878 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
879 f09f2189 Avi Kivity
            if (version_id == 4) {
880 97ab12d4 Alex Williamson
                /* Synchronize RAM block list */
881 97ab12d4 Alex Williamson
                char id[256];
882 97ab12d4 Alex Williamson
                ram_addr_t length;
883 97ab12d4 Alex Williamson
                ram_addr_t total_ram_bytes = addr;
884 97ab12d4 Alex Williamson
885 97ab12d4 Alex Williamson
                while (total_ram_bytes) {
886 97ab12d4 Alex Williamson
                    RAMBlock *block;
887 97ab12d4 Alex Williamson
                    uint8_t len;
888 97ab12d4 Alex Williamson
889 97ab12d4 Alex Williamson
                    len = qemu_get_byte(f);
890 97ab12d4 Alex Williamson
                    qemu_get_buffer(f, (uint8_t *)id, len);
891 97ab12d4 Alex Williamson
                    id[len] = 0;
892 97ab12d4 Alex Williamson
                    length = qemu_get_be64(f);
893 97ab12d4 Alex Williamson
894 a3161038 Paolo Bonzini
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
895 97ab12d4 Alex Williamson
                        if (!strncmp(id, block->idstr, sizeof(id))) {
896 3a697f69 Orit Wasserman
                            if (block->length != length) {
897 6bedfe94 Stefan Weil
                                fprintf(stderr,
898 6bedfe94 Stefan Weil
                                        "Length mismatch: %s: " RAM_ADDR_FMT
899 6bedfe94 Stefan Weil
                                        " in != " RAM_ADDR_FMT "\n", id, length,
900 87d2f825 Alon Levy
                                        block->length);
901 3a697f69 Orit Wasserman
                                ret =  -EINVAL;
902 3a697f69 Orit Wasserman
                                goto done;
903 3a697f69 Orit Wasserman
                            }
904 97ab12d4 Alex Williamson
                            break;
905 97ab12d4 Alex Williamson
                        }
906 97ab12d4 Alex Williamson
                    }
907 97ab12d4 Alex Williamson
908 97ab12d4 Alex Williamson
                    if (!block) {
909 fb787f81 Alex Williamson
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
910 fb787f81 Alex Williamson
                                "accept migration\n", id);
911 3a697f69 Orit Wasserman
                        ret = -EINVAL;
912 3a697f69 Orit Wasserman
                        goto done;
913 97ab12d4 Alex Williamson
                    }
914 97ab12d4 Alex Williamson
915 97ab12d4 Alex Williamson
                    total_ram_bytes -= length;
916 97ab12d4 Alex Williamson
                }
917 ad96090a Blue Swirl
            }
918 ad96090a Blue Swirl
        }
919 ad96090a Blue Swirl
920 ad96090a Blue Swirl
        if (flags & RAM_SAVE_FLAG_COMPRESS) {
921 97ab12d4 Alex Williamson
            void *host;
922 97ab12d4 Alex Williamson
            uint8_t ch;
923 97ab12d4 Alex Williamson
924 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
925 492fb99c Michael S. Tsirkin
            if (!host) {
926 492fb99c Michael S. Tsirkin
                return -EINVAL;
927 492fb99c Michael S. Tsirkin
            }
928 97ab12d4 Alex Williamson
929 97ab12d4 Alex Williamson
            ch = qemu_get_byte(f);
930 44c3b58c Michael R. Hines
            ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
931 ad96090a Blue Swirl
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
932 97ab12d4 Alex Williamson
            void *host;
933 97ab12d4 Alex Williamson
934 f09f2189 Avi Kivity
            host = host_from_stream_offset(f, addr, flags);
935 0ff1f9f5 Orit Wasserman
            if (!host) {
936 0ff1f9f5 Orit Wasserman
                return -EINVAL;
937 0ff1f9f5 Orit Wasserman
            }
938 97ab12d4 Alex Williamson
939 97ab12d4 Alex Williamson
            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
940 17ad9b35 Orit Wasserman
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
941 17ad9b35 Orit Wasserman
            void *host = host_from_stream_offset(f, addr, flags);
942 17ad9b35 Orit Wasserman
            if (!host) {
943 17ad9b35 Orit Wasserman
                return -EINVAL;
944 17ad9b35 Orit Wasserman
            }
945 17ad9b35 Orit Wasserman
946 17ad9b35 Orit Wasserman
            if (load_xbzrle(f, addr, host) < 0) {
947 17ad9b35 Orit Wasserman
                ret = -EINVAL;
948 17ad9b35 Orit Wasserman
                goto done;
949 17ad9b35 Orit Wasserman
            }
950 0033b8b4 Michael R. Hines
        } else if (flags & RAM_SAVE_FLAG_HOOK) {
951 0033b8b4 Michael R. Hines
            ram_control_load_hook(f, flags);
952 ad96090a Blue Swirl
        }
953 42802d47 Juan Quintela
        error = qemu_file_get_error(f);
954 42802d47 Juan Quintela
        if (error) {
955 3a697f69 Orit Wasserman
            ret = error;
956 3a697f69 Orit Wasserman
            goto done;
957 ad96090a Blue Swirl
        }
958 ad96090a Blue Swirl
    } while (!(flags & RAM_SAVE_FLAG_EOS));
959 ad96090a Blue Swirl
960 3a697f69 Orit Wasserman
done:
961 ef37a699 Igor Mitsyanko
    DPRINTF("Completed load of VM with exit code %d seq iteration "
962 ef37a699 Igor Mitsyanko
            "%" PRIu64 "\n", ret, seq_iter);
963 3a697f69 Orit Wasserman
    return ret;
964 ad96090a Blue Swirl
}
965 ad96090a Blue Swirl
966 7908c78d Juan Quintela
SaveVMHandlers savevm_ram_handlers = {
967 d1315aac Juan Quintela
    .save_live_setup = ram_save_setup,
968 16310a3c Juan Quintela
    .save_live_iterate = ram_save_iterate,
969 16310a3c Juan Quintela
    .save_live_complete = ram_save_complete,
970 e4ed1541 Juan Quintela
    .save_live_pending = ram_save_pending,
971 7908c78d Juan Quintela
    .load_state = ram_load,
972 9b5bfab0 Juan Quintela
    .cancel = ram_migration_cancel,
973 7908c78d Juan Quintela
};
974 7908c78d Juan Quintela
975 0dfa5ef9 Isaku Yamahata
struct soundhw {
976 0dfa5ef9 Isaku Yamahata
    const char *name;
977 0dfa5ef9 Isaku Yamahata
    const char *descr;
978 0dfa5ef9 Isaku Yamahata
    int enabled;
979 0dfa5ef9 Isaku Yamahata
    int isa;
980 0dfa5ef9 Isaku Yamahata
    union {
981 4a0f031d Hervé Poussineau
        int (*init_isa) (ISABus *bus);
982 0dfa5ef9 Isaku Yamahata
        int (*init_pci) (PCIBus *bus);
983 0dfa5ef9 Isaku Yamahata
    } init;
984 0dfa5ef9 Isaku Yamahata
};
985 0dfa5ef9 Isaku Yamahata
986 36cd6f6f Paolo Bonzini
static struct soundhw soundhw[9];
987 36cd6f6f Paolo Bonzini
static int soundhw_count;
988 ad96090a Blue Swirl
989 36cd6f6f Paolo Bonzini
void isa_register_soundhw(const char *name, const char *descr,
990 36cd6f6f Paolo Bonzini
                          int (*init_isa)(ISABus *bus))
991 36cd6f6f Paolo Bonzini
{
992 36cd6f6f Paolo Bonzini
    assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
993 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].name = name;
994 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].descr = descr;
995 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].isa = 1;
996 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].init.init_isa = init_isa;
997 36cd6f6f Paolo Bonzini
    soundhw_count++;
998 36cd6f6f Paolo Bonzini
}
999 ad96090a Blue Swirl
1000 36cd6f6f Paolo Bonzini
void pci_register_soundhw(const char *name, const char *descr,
1001 36cd6f6f Paolo Bonzini
                          int (*init_pci)(PCIBus *bus))
1002 36cd6f6f Paolo Bonzini
{
1003 36cd6f6f Paolo Bonzini
    assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
1004 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].name = name;
1005 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].descr = descr;
1006 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].isa = 0;
1007 36cd6f6f Paolo Bonzini
    soundhw[soundhw_count].init.init_pci = init_pci;
1008 36cd6f6f Paolo Bonzini
    soundhw_count++;
1009 36cd6f6f Paolo Bonzini
}
1010 ad96090a Blue Swirl
1011 ad96090a Blue Swirl
void select_soundhw(const char *optarg)
1012 ad96090a Blue Swirl
{
1013 ad96090a Blue Swirl
    struct soundhw *c;
1014 ad96090a Blue Swirl
1015 c8057f95 Peter Maydell
    if (is_help_option(optarg)) {
1016 ad96090a Blue Swirl
    show_valid_cards:
1017 ad96090a Blue Swirl
1018 36cd6f6f Paolo Bonzini
        if (soundhw_count) {
1019 36cd6f6f Paolo Bonzini
             printf("Valid sound card names (comma separated):\n");
1020 36cd6f6f Paolo Bonzini
             for (c = soundhw; c->name; ++c) {
1021 36cd6f6f Paolo Bonzini
                 printf ("%-11s %s\n", c->name, c->descr);
1022 36cd6f6f Paolo Bonzini
             }
1023 36cd6f6f Paolo Bonzini
             printf("\n-soundhw all will enable all of the above\n");
1024 36cd6f6f Paolo Bonzini
        } else {
1025 36cd6f6f Paolo Bonzini
             printf("Machine has no user-selectable audio hardware "
1026 36cd6f6f Paolo Bonzini
                    "(it may or may not have always-present audio hardware).\n");
1027 ad96090a Blue Swirl
        }
1028 c8057f95 Peter Maydell
        exit(!is_help_option(optarg));
1029 ad96090a Blue Swirl
    }
1030 ad96090a Blue Swirl
    else {
1031 ad96090a Blue Swirl
        size_t l;
1032 ad96090a Blue Swirl
        const char *p;
1033 ad96090a Blue Swirl
        char *e;
1034 ad96090a Blue Swirl
        int bad_card = 0;
1035 ad96090a Blue Swirl
1036 ad96090a Blue Swirl
        if (!strcmp(optarg, "all")) {
1037 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1038 ad96090a Blue Swirl
                c->enabled = 1;
1039 ad96090a Blue Swirl
            }
1040 ad96090a Blue Swirl
            return;
1041 ad96090a Blue Swirl
        }
1042 ad96090a Blue Swirl
1043 ad96090a Blue Swirl
        p = optarg;
1044 ad96090a Blue Swirl
        while (*p) {
1045 ad96090a Blue Swirl
            e = strchr(p, ',');
1046 ad96090a Blue Swirl
            l = !e ? strlen(p) : (size_t) (e - p);
1047 ad96090a Blue Swirl
1048 ad96090a Blue Swirl
            for (c = soundhw; c->name; ++c) {
1049 ad96090a Blue Swirl
                if (!strncmp(c->name, p, l) && !c->name[l]) {
1050 ad96090a Blue Swirl
                    c->enabled = 1;
1051 ad96090a Blue Swirl
                    break;
1052 ad96090a Blue Swirl
                }
1053 ad96090a Blue Swirl
            }
1054 ad96090a Blue Swirl
1055 ad96090a Blue Swirl
            if (!c->name) {
1056 ad96090a Blue Swirl
                if (l > 80) {
1057 ad96090a Blue Swirl
                    fprintf(stderr,
1058 ad96090a Blue Swirl
                            "Unknown sound card name (too big to show)\n");
1059 ad96090a Blue Swirl
                }
1060 ad96090a Blue Swirl
                else {
1061 ad96090a Blue Swirl
                    fprintf(stderr, "Unknown sound card name `%.*s'\n",
1062 ad96090a Blue Swirl
                            (int) l, p);
1063 ad96090a Blue Swirl
                }
1064 ad96090a Blue Swirl
                bad_card = 1;
1065 ad96090a Blue Swirl
            }
1066 ad96090a Blue Swirl
            p += l + (e != NULL);
1067 ad96090a Blue Swirl
        }
1068 ad96090a Blue Swirl
1069 ad96090a Blue Swirl
        if (bad_card) {
1070 ad96090a Blue Swirl
            goto show_valid_cards;
1071 ad96090a Blue Swirl
        }
1072 ad96090a Blue Swirl
    }
1073 ad96090a Blue Swirl
}
1074 0dfa5ef9 Isaku Yamahata
1075 f81222bc Paolo Bonzini
void audio_init(void)
1076 0dfa5ef9 Isaku Yamahata
{
1077 0dfa5ef9 Isaku Yamahata
    struct soundhw *c;
1078 f81222bc Paolo Bonzini
    ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
1079 f81222bc Paolo Bonzini
    PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
1080 0dfa5ef9 Isaku Yamahata
1081 0dfa5ef9 Isaku Yamahata
    for (c = soundhw; c->name; ++c) {
1082 0dfa5ef9 Isaku Yamahata
        if (c->enabled) {
1083 0dfa5ef9 Isaku Yamahata
            if (c->isa) {
1084 f81222bc Paolo Bonzini
                if (!isa_bus) {
1085 f81222bc Paolo Bonzini
                    fprintf(stderr, "ISA bus not available for %s\n", c->name);
1086 f81222bc Paolo Bonzini
                    exit(1);
1087 0dfa5ef9 Isaku Yamahata
                }
1088 f81222bc Paolo Bonzini
                c->init.init_isa(isa_bus);
1089 0dfa5ef9 Isaku Yamahata
            } else {
1090 f81222bc Paolo Bonzini
                if (!pci_bus) {
1091 f81222bc Paolo Bonzini
                    fprintf(stderr, "PCI bus not available for %s\n", c->name);
1092 f81222bc Paolo Bonzini
                    exit(1);
1093 0dfa5ef9 Isaku Yamahata
                }
1094 f81222bc Paolo Bonzini
                c->init.init_pci(pci_bus);
1095 0dfa5ef9 Isaku Yamahata
            }
1096 0dfa5ef9 Isaku Yamahata
        }
1097 0dfa5ef9 Isaku Yamahata
    }
1098 0dfa5ef9 Isaku Yamahata
}
1099 ad96090a Blue Swirl
1100 ad96090a Blue Swirl
int qemu_uuid_parse(const char *str, uint8_t *uuid)
1101 ad96090a Blue Swirl
{
1102 ad96090a Blue Swirl
    int ret;
1103 ad96090a Blue Swirl
1104 ad96090a Blue Swirl
    if (strlen(str) != 36) {
1105 ad96090a Blue Swirl
        return -1;
1106 ad96090a Blue Swirl
    }
1107 ad96090a Blue Swirl
1108 ad96090a Blue Swirl
    ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
1109 ad96090a Blue Swirl
                 &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
1110 ad96090a Blue Swirl
                 &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
1111 ad96090a Blue Swirl
                 &uuid[15]);
1112 ad96090a Blue Swirl
1113 ad96090a Blue Swirl
    if (ret != 16) {
1114 ad96090a Blue Swirl
        return -1;
1115 ad96090a Blue Swirl
    }
1116 ad96090a Blue Swirl
#ifdef TARGET_I386
1117 ebc85e3f Markus Armbruster
    smbios_add_field(1, offsetof(struct smbios_type_1, uuid), uuid, 16);
1118 ad96090a Blue Swirl
#endif
1119 ad96090a Blue Swirl
    return 0;
1120 ad96090a Blue Swirl
}
1121 ad96090a Blue Swirl
1122 0c764a9d Laszlo Ersek
void do_acpitable_option(const QemuOpts *opts)
1123 ad96090a Blue Swirl
{
1124 ad96090a Blue Swirl
#ifdef TARGET_I386
1125 23084327 Laszlo Ersek
    Error *err = NULL;
1126 23084327 Laszlo Ersek
1127 23084327 Laszlo Ersek
    acpi_table_add(opts, &err);
1128 23084327 Laszlo Ersek
    if (err) {
1129 4a44d85e Seiji Aguchi
        error_report("Wrong acpi table provided: %s",
1130 4a44d85e Seiji Aguchi
                     error_get_pretty(err));
1131 23084327 Laszlo Ersek
        error_free(err);
1132 ad96090a Blue Swirl
        exit(1);
1133 ad96090a Blue Swirl
    }
1134 ad96090a Blue Swirl
#endif
1135 ad96090a Blue Swirl
}
1136 ad96090a Blue Swirl
1137 ad96090a Blue Swirl
void do_smbios_option(const char *optarg)
1138 ad96090a Blue Swirl
{
1139 ad96090a Blue Swirl
#ifdef TARGET_I386
1140 ad96090a Blue Swirl
    if (smbios_entry_add(optarg) < 0) {
1141 ad96090a Blue Swirl
        exit(1);
1142 ad96090a Blue Swirl
    }
1143 ad96090a Blue Swirl
#endif
1144 ad96090a Blue Swirl
}
1145 ad96090a Blue Swirl
1146 ad96090a Blue Swirl
void cpudef_init(void)
1147 ad96090a Blue Swirl
{
1148 ad96090a Blue Swirl
#if defined(cpudef_setup)
1149 ad96090a Blue Swirl
    cpudef_setup(); /* parse cpu definitions in target config file */
1150 ad96090a Blue Swirl
#endif
1151 ad96090a Blue Swirl
}
1152 ad96090a Blue Swirl
1153 303d4e86 Anthony PERARD
int tcg_available(void)
1154 303d4e86 Anthony PERARD
{
1155 303d4e86 Anthony PERARD
    return 1;
1156 303d4e86 Anthony PERARD
}
1157 303d4e86 Anthony PERARD
1158 ad96090a Blue Swirl
int kvm_available(void)
1159 ad96090a Blue Swirl
{
1160 ad96090a Blue Swirl
#ifdef CONFIG_KVM
1161 ad96090a Blue Swirl
    return 1;
1162 ad96090a Blue Swirl
#else
1163 ad96090a Blue Swirl
    return 0;
1164 ad96090a Blue Swirl
#endif
1165 ad96090a Blue Swirl
}
1166 ad96090a Blue Swirl
1167 ad96090a Blue Swirl
int xen_available(void)
1168 ad96090a Blue Swirl
{
1169 ad96090a Blue Swirl
#ifdef CONFIG_XEN
1170 ad96090a Blue Swirl
    return 1;
1171 ad96090a Blue Swirl
#else
1172 ad96090a Blue Swirl
    return 0;
1173 ad96090a Blue Swirl
#endif
1174 ad96090a Blue Swirl
}
1175 99afc91d Daniel P. Berrange
1176 99afc91d Daniel P. Berrange
1177 99afc91d Daniel P. Berrange
TargetInfo *qmp_query_target(Error **errp)
1178 99afc91d Daniel P. Berrange
{
1179 99afc91d Daniel P. Berrange
    TargetInfo *info = g_malloc0(sizeof(*info));
1180 99afc91d Daniel P. Berrange
1181 c02a9552 Paolo Bonzini
    info->arch = g_strdup(TARGET_NAME);
1182 99afc91d Daniel P. Berrange
1183 99afc91d Daniel P. Berrange
    return info;
1184 99afc91d Daniel P. Berrange
}
1185 7ca1dfad Chegu Vinod
1186 7ca1dfad Chegu Vinod
/* Stub function that's gets run on the vcpu when its brought out of the
1187 7ca1dfad Chegu Vinod
   VM to run inside qemu via async_run_on_cpu()*/
1188 7ca1dfad Chegu Vinod
static void mig_sleep_cpu(void *opq)
1189 7ca1dfad Chegu Vinod
{
1190 7ca1dfad Chegu Vinod
    qemu_mutex_unlock_iothread();
1191 7ca1dfad Chegu Vinod
    g_usleep(30*1000);
1192 7ca1dfad Chegu Vinod
    qemu_mutex_lock_iothread();
1193 7ca1dfad Chegu Vinod
}
1194 7ca1dfad Chegu Vinod
1195 7ca1dfad Chegu Vinod
/* To reduce the dirty rate explicitly disallow the VCPUs from spending
1196 7ca1dfad Chegu Vinod
   much time in the VM. The migration thread will try to catchup.
1197 7ca1dfad Chegu Vinod
   Workload will experience a performance drop.
1198 7ca1dfad Chegu Vinod
*/
1199 7ca1dfad Chegu Vinod
static void mig_throttle_guest_down(void)
1200 7ca1dfad Chegu Vinod
{
1201 38fcbd3f Andreas Färber
    CPUState *cpu;
1202 38fcbd3f Andreas Färber
1203 7ca1dfad Chegu Vinod
    qemu_mutex_lock_iothread();
1204 38fcbd3f Andreas Färber
    CPU_FOREACH(cpu) {
1205 38fcbd3f Andreas Färber
        async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
1206 38fcbd3f Andreas Färber
    }
1207 7ca1dfad Chegu Vinod
    qemu_mutex_unlock_iothread();
1208 7ca1dfad Chegu Vinod
}
1209 7ca1dfad Chegu Vinod
1210 7ca1dfad Chegu Vinod
static void check_guest_throttling(void)
1211 7ca1dfad Chegu Vinod
{
1212 7ca1dfad Chegu Vinod
    static int64_t t0;
1213 7ca1dfad Chegu Vinod
    int64_t        t1;
1214 7ca1dfad Chegu Vinod
1215 7ca1dfad Chegu Vinod
    if (!mig_throttle_on) {
1216 7ca1dfad Chegu Vinod
        return;
1217 7ca1dfad Chegu Vinod
    }
1218 7ca1dfad Chegu Vinod
1219 7ca1dfad Chegu Vinod
    if (!t0)  {
1220 bc72ad67 Alex Bligh
        t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1221 7ca1dfad Chegu Vinod
        return;
1222 7ca1dfad Chegu Vinod
    }
1223 7ca1dfad Chegu Vinod
1224 bc72ad67 Alex Bligh
    t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1225 7ca1dfad Chegu Vinod
1226 7ca1dfad Chegu Vinod
    /* If it has been more than 40 ms since the last time the guest
1227 7ca1dfad Chegu Vinod
     * was throttled then do it again.
1228 7ca1dfad Chegu Vinod
     */
1229 7ca1dfad Chegu Vinod
    if (40 < (t1-t0)/1000000) {
1230 7ca1dfad Chegu Vinod
        mig_throttle_guest_down();
1231 7ca1dfad Chegu Vinod
        t0 = t1;
1232 7ca1dfad Chegu Vinod
    }
1233 7ca1dfad Chegu Vinod
}