Statistics
| Branch: | Revision:

root / exec.c @ 5cc11c46

History | View | Annotate | Download (74 kB)

1 54936004 bellard
/*
2 5b6dd868 Blue Swirl
 *  Virtual page mapping
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 1de7afc9 Paolo Bonzini
#include "qemu/osdep.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
36 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
37 022c62cb Paolo Bonzini
#include "exec/memory.h"
38 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
39 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
40 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
41 53a5960a pbrook
#include <qemu.h>
42 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
43 9c17d615 Paolo Bonzini
#include "sysemu/xen-mapcache.h"
44 6506e4f9 Stefano Stabellini
#include "trace.h"
45 53a5960a pbrook
#endif
46 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
47 54936004 bellard
48 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
49 5b6dd868 Blue Swirl
#include "translate-all.h"
50 0cac1b66 Blue Swirl
51 022c62cb Paolo Bonzini
#include "exec/memory-internal.h"
52 67d95c15 Avi Kivity
53 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
54 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
55 1196be37 ths
56 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
57 9fa3e853 bellard
int phys_ram_fd;
58 74576198 aliguori
static int in_migration;
59 94a6b54f pbrook
60 a3161038 Paolo Bonzini
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
61 62152b8a Avi Kivity
62 62152b8a Avi Kivity
static MemoryRegion *system_memory;
63 309cb471 Avi Kivity
static MemoryRegion *system_io;
64 62152b8a Avi Kivity
65 f6790af6 Avi Kivity
AddressSpace address_space_io;
66 f6790af6 Avi Kivity
AddressSpace address_space_memory;
67 9e11908f Peter Maydell
DMAContext dma_context_memory;
68 2673a5da Avi Kivity
69 0e0df1e2 Avi Kivity
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
70 de712f94 Avi Kivity
static MemoryRegion io_mem_subpage_ram;
71 0e0df1e2 Avi Kivity
72 e2eef170 pbrook
#endif
73 9fa3e853 bellard
74 9349b4f9 Andreas Färber
CPUArchState *first_cpu;
75 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
76 6a00d601 bellard
   cpu_exec() */
77 9349b4f9 Andreas Färber
DEFINE_TLS(CPUArchState *,cpu_single_env);
78 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
79 bf20dc07 ths
   1 = Precise instruction counting.
80 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
81 5708fc66 Paolo Bonzini
int use_icount;
82 6a00d601 bellard
83 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
84 4346ae3e Avi Kivity
85 5312bd8b Avi Kivity
static MemoryRegionSection *phys_sections;
86 5312bd8b Avi Kivity
static unsigned phys_sections_nb, phys_sections_nb_alloc;
87 5312bd8b Avi Kivity
static uint16_t phys_section_unassigned;
88 aa102231 Avi Kivity
static uint16_t phys_section_notdirty;
89 aa102231 Avi Kivity
static uint16_t phys_section_rom;
90 aa102231 Avi Kivity
static uint16_t phys_section_watch;
91 5312bd8b Avi Kivity
92 d6f2ea22 Avi Kivity
/* Simple allocator for PhysPageEntry nodes */
93 d6f2ea22 Avi Kivity
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94 d6f2ea22 Avi Kivity
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95 d6f2ea22 Avi Kivity
96 07f07b31 Avi Kivity
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
97 d6f2ea22 Avi Kivity
98 e2eef170 pbrook
static void io_mem_init(void);
99 62152b8a Avi Kivity
static void memory_map_init(void);
100 8b9c99d9 Blue Swirl
static void *qemu_safe_ram_ptr(ram_addr_t addr);
101 e2eef170 pbrook
102 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
103 6658ffb8 pbrook
#endif
104 fd6ce8f6 bellard
105 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
106 d6f2ea22 Avi Kivity
107 f7bf5461 Avi Kivity
static void phys_map_node_reserve(unsigned nodes)
108 d6f2ea22 Avi Kivity
{
109 f7bf5461 Avi Kivity
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 d6f2ea22 Avi Kivity
        typedef PhysPageEntry Node[L2_SIZE];
111 d6f2ea22 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 f7bf5461 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 f7bf5461 Avi Kivity
                                      phys_map_nodes_nb + nodes);
114 d6f2ea22 Avi Kivity
        phys_map_nodes = g_renew(Node, phys_map_nodes,
115 d6f2ea22 Avi Kivity
                                 phys_map_nodes_nb_alloc);
116 d6f2ea22 Avi Kivity
    }
117 f7bf5461 Avi Kivity
}
118 f7bf5461 Avi Kivity
119 f7bf5461 Avi Kivity
static uint16_t phys_map_node_alloc(void)
120 f7bf5461 Avi Kivity
{
121 f7bf5461 Avi Kivity
    unsigned i;
122 f7bf5461 Avi Kivity
    uint16_t ret;
123 f7bf5461 Avi Kivity
124 f7bf5461 Avi Kivity
    ret = phys_map_nodes_nb++;
125 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
126 f7bf5461 Avi Kivity
    assert(ret != phys_map_nodes_nb_alloc);
127 d6f2ea22 Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
128 07f07b31 Avi Kivity
        phys_map_nodes[ret][i].is_leaf = 0;
129 c19e8800 Avi Kivity
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
130 d6f2ea22 Avi Kivity
    }
131 f7bf5461 Avi Kivity
    return ret;
132 d6f2ea22 Avi Kivity
}
133 d6f2ea22 Avi Kivity
134 d6f2ea22 Avi Kivity
static void phys_map_nodes_reset(void)
135 d6f2ea22 Avi Kivity
{
136 d6f2ea22 Avi Kivity
    phys_map_nodes_nb = 0;
137 d6f2ea22 Avi Kivity
}
138 d6f2ea22 Avi Kivity
139 92e873b9 bellard
140 a8170e5e Avi Kivity
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 a8170e5e Avi Kivity
                                hwaddr *nb, uint16_t leaf,
142 2999097b Avi Kivity
                                int level)
143 f7bf5461 Avi Kivity
{
144 f7bf5461 Avi Kivity
    PhysPageEntry *p;
145 f7bf5461 Avi Kivity
    int i;
146 a8170e5e Avi Kivity
    hwaddr step = (hwaddr)1 << (level * L2_BITS);
147 108c49b8 bellard
148 07f07b31 Avi Kivity
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
149 c19e8800 Avi Kivity
        lp->ptr = phys_map_node_alloc();
150 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
151 f7bf5461 Avi Kivity
        if (level == 0) {
152 f7bf5461 Avi Kivity
            for (i = 0; i < L2_SIZE; i++) {
153 07f07b31 Avi Kivity
                p[i].is_leaf = 1;
154 c19e8800 Avi Kivity
                p[i].ptr = phys_section_unassigned;
155 4346ae3e Avi Kivity
            }
156 67c4d23c pbrook
        }
157 f7bf5461 Avi Kivity
    } else {
158 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
159 92e873b9 bellard
    }
160 2999097b Avi Kivity
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
161 f7bf5461 Avi Kivity
162 2999097b Avi Kivity
    while (*nb && lp < &p[L2_SIZE]) {
163 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
164 07f07b31 Avi Kivity
            lp->is_leaf = true;
165 c19e8800 Avi Kivity
            lp->ptr = leaf;
166 07f07b31 Avi Kivity
            *index += step;
167 07f07b31 Avi Kivity
            *nb -= step;
168 2999097b Avi Kivity
        } else {
169 2999097b Avi Kivity
            phys_page_set_level(lp, index, nb, leaf, level - 1);
170 2999097b Avi Kivity
        }
171 2999097b Avi Kivity
        ++lp;
172 f7bf5461 Avi Kivity
    }
173 f7bf5461 Avi Kivity
}
174 f7bf5461 Avi Kivity
175 ac1970fb Avi Kivity
static void phys_page_set(AddressSpaceDispatch *d,
176 a8170e5e Avi Kivity
                          hwaddr index, hwaddr nb,
177 2999097b Avi Kivity
                          uint16_t leaf)
178 f7bf5461 Avi Kivity
{
179 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
180 07f07b31 Avi Kivity
    phys_map_node_reserve(3 * P_L2_LEVELS);
181 5cd2c5b6 Richard Henderson
182 ac1970fb Avi Kivity
    phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
183 92e873b9 bellard
}
184 92e873b9 bellard
185 a8170e5e Avi Kivity
MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
186 92e873b9 bellard
{
187 ac1970fb Avi Kivity
    PhysPageEntry lp = d->phys_map;
188 31ab2b4a Avi Kivity
    PhysPageEntry *p;
189 31ab2b4a Avi Kivity
    int i;
190 31ab2b4a Avi Kivity
    uint16_t s_index = phys_section_unassigned;
191 f1f6e3b8 Avi Kivity
192 07f07b31 Avi Kivity
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
193 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
194 31ab2b4a Avi Kivity
            goto not_found;
195 31ab2b4a Avi Kivity
        }
196 c19e8800 Avi Kivity
        p = phys_map_nodes[lp.ptr];
197 31ab2b4a Avi Kivity
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
198 5312bd8b Avi Kivity
    }
199 31ab2b4a Avi Kivity
200 c19e8800 Avi Kivity
    s_index = lp.ptr;
201 31ab2b4a Avi Kivity
not_found:
202 f3705d53 Avi Kivity
    return &phys_sections[s_index];
203 f3705d53 Avi Kivity
}
204 f3705d53 Avi Kivity
205 e5548617 Blue Swirl
bool memory_region_is_unassigned(MemoryRegion *mr)
206 e5548617 Blue Swirl
{
207 e5548617 Blue Swirl
    return mr != &io_mem_ram && mr != &io_mem_rom
208 e5548617 Blue Swirl
        && mr != &io_mem_notdirty && !mr->rom_device
209 5b6dd868 Blue Swirl
        && mr != &io_mem_watch;
210 fd6ce8f6 bellard
}
211 5b6dd868 Blue Swirl
#endif
212 fd6ce8f6 bellard
213 5b6dd868 Blue Swirl
void cpu_exec_init_all(void)
214 fdbb84d1 Yeongkyoon Lee
{
215 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
216 b2a8658e Umesh Deshpande
    qemu_mutex_init(&ram_list.mutex);
217 5b6dd868 Blue Swirl
    memory_map_init();
218 5b6dd868 Blue Swirl
    io_mem_init();
219 fdbb84d1 Yeongkyoon Lee
#endif
220 5b6dd868 Blue Swirl
}
221 fdbb84d1 Yeongkyoon Lee
222 b170fce3 Andreas Färber
#if !defined(CONFIG_USER_ONLY)
223 5b6dd868 Blue Swirl
224 5b6dd868 Blue Swirl
static int cpu_common_post_load(void *opaque, int version_id)
225 fd6ce8f6 bellard
{
226 259186a7 Andreas Färber
    CPUState *cpu = opaque;
227 a513fe19 bellard
228 5b6dd868 Blue Swirl
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 5b6dd868 Blue Swirl
       version_id is increased. */
230 259186a7 Andreas Färber
    cpu->interrupt_request &= ~0x01;
231 259186a7 Andreas Färber
    tlb_flush(cpu->env_ptr, 1);
232 5b6dd868 Blue Swirl
233 5b6dd868 Blue Swirl
    return 0;
234 a513fe19 bellard
}
235 7501267e bellard
236 5b6dd868 Blue Swirl
static const VMStateDescription vmstate_cpu_common = {
237 5b6dd868 Blue Swirl
    .name = "cpu_common",
238 5b6dd868 Blue Swirl
    .version_id = 1,
239 5b6dd868 Blue Swirl
    .minimum_version_id = 1,
240 5b6dd868 Blue Swirl
    .minimum_version_id_old = 1,
241 5b6dd868 Blue Swirl
    .post_load = cpu_common_post_load,
242 5b6dd868 Blue Swirl
    .fields      = (VMStateField []) {
243 259186a7 Andreas Färber
        VMSTATE_UINT32(halted, CPUState),
244 259186a7 Andreas Färber
        VMSTATE_UINT32(interrupt_request, CPUState),
245 5b6dd868 Blue Swirl
        VMSTATE_END_OF_LIST()
246 5b6dd868 Blue Swirl
    }
247 5b6dd868 Blue Swirl
};
248 b170fce3 Andreas Färber
#else
249 b170fce3 Andreas Färber
#define vmstate_cpu_common vmstate_dummy
250 5b6dd868 Blue Swirl
#endif
251 ea041c0e bellard
252 38d8f5c8 Andreas Färber
CPUState *qemu_get_cpu(int index)
253 ea041c0e bellard
{
254 5b6dd868 Blue Swirl
    CPUArchState *env = first_cpu;
255 38d8f5c8 Andreas Färber
    CPUState *cpu = NULL;
256 ea041c0e bellard
257 5b6dd868 Blue Swirl
    while (env) {
258 55e5c285 Andreas Färber
        cpu = ENV_GET_CPU(env);
259 55e5c285 Andreas Färber
        if (cpu->cpu_index == index) {
260 5b6dd868 Blue Swirl
            break;
261 55e5c285 Andreas Färber
        }
262 5b6dd868 Blue Swirl
        env = env->next_cpu;
263 ea041c0e bellard
    }
264 5b6dd868 Blue Swirl
265 d76fddae Igor Mammedov
    return env ? cpu : NULL;
266 ea041c0e bellard
}
267 ea041c0e bellard
268 5b6dd868 Blue Swirl
void cpu_exec_init(CPUArchState *env)
269 ea041c0e bellard
{
270 5b6dd868 Blue Swirl
    CPUState *cpu = ENV_GET_CPU(env);
271 b170fce3 Andreas Färber
    CPUClass *cc = CPU_GET_CLASS(cpu);
272 5b6dd868 Blue Swirl
    CPUArchState **penv;
273 5b6dd868 Blue Swirl
    int cpu_index;
274 5b6dd868 Blue Swirl
275 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
276 5b6dd868 Blue Swirl
    cpu_list_lock();
277 5b6dd868 Blue Swirl
#endif
278 5b6dd868 Blue Swirl
    env->next_cpu = NULL;
279 5b6dd868 Blue Swirl
    penv = &first_cpu;
280 5b6dd868 Blue Swirl
    cpu_index = 0;
281 5b6dd868 Blue Swirl
    while (*penv != NULL) {
282 5b6dd868 Blue Swirl
        penv = &(*penv)->next_cpu;
283 5b6dd868 Blue Swirl
        cpu_index++;
284 5b6dd868 Blue Swirl
    }
285 55e5c285 Andreas Färber
    cpu->cpu_index = cpu_index;
286 1b1ed8dc Andreas Färber
    cpu->numa_node = 0;
287 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
288 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
289 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
290 5b6dd868 Blue Swirl
    cpu->thread_id = qemu_get_thread_id();
291 5b6dd868 Blue Swirl
#endif
292 5b6dd868 Blue Swirl
    *penv = env;
293 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
294 5b6dd868 Blue Swirl
    cpu_list_unlock();
295 5b6dd868 Blue Swirl
#endif
296 259186a7 Andreas Färber
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
297 5b6dd868 Blue Swirl
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
298 5b6dd868 Blue Swirl
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
299 5b6dd868 Blue Swirl
                    cpu_save, cpu_load, env);
300 b170fce3 Andreas Färber
    assert(cc->vmsd == NULL);
301 5b6dd868 Blue Swirl
#endif
302 b170fce3 Andreas Färber
    if (cc->vmsd != NULL) {
303 b170fce3 Andreas Färber
        vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 b170fce3 Andreas Färber
    }
305 ea041c0e bellard
}
306 ea041c0e bellard
307 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
308 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
309 9349b4f9 Andreas Färber
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
310 94df27fd Paul Brook
{
311 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
312 94df27fd Paul Brook
}
313 94df27fd Paul Brook
#else
314 1e7855a5 Max Filippov
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315 1e7855a5 Max Filippov
{
316 9d70c4b7 Max Filippov
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 9d70c4b7 Max Filippov
            (pc & ~TARGET_PAGE_MASK));
318 1e7855a5 Max Filippov
}
319 c27004ec bellard
#endif
320 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
321 d720b93d bellard
322 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
323 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
324 c527ee8f Paul Brook
325 c527ee8f Paul Brook
{
326 c527ee8f Paul Brook
}
327 c527ee8f Paul Brook
328 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
329 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
330 c527ee8f Paul Brook
{
331 c527ee8f Paul Brook
    return -ENOSYS;
332 c527ee8f Paul Brook
}
333 c527ee8f Paul Brook
#else
334 6658ffb8 pbrook
/* Add a watchpoint.  */
335 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
336 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
337 6658ffb8 pbrook
{
338 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
339 c0ce998e aliguori
    CPUWatchpoint *wp;
340 6658ffb8 pbrook
341 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
342 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
343 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
344 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 b4051334 aliguori
        return -EINVAL;
347 b4051334 aliguori
    }
348 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
349 a1d1bb31 aliguori
350 a1d1bb31 aliguori
    wp->vaddr = addr;
351 b4051334 aliguori
    wp->len_mask = len_mask;
352 a1d1bb31 aliguori
    wp->flags = flags;
353 a1d1bb31 aliguori
354 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
355 c0ce998e aliguori
    if (flags & BP_GDB)
356 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
357 c0ce998e aliguori
    else
358 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
359 6658ffb8 pbrook
360 6658ffb8 pbrook
    tlb_flush_page(env, addr);
361 a1d1bb31 aliguori
362 a1d1bb31 aliguori
    if (watchpoint)
363 a1d1bb31 aliguori
        *watchpoint = wp;
364 a1d1bb31 aliguori
    return 0;
365 6658ffb8 pbrook
}
366 6658ffb8 pbrook
367 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
368 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
369 a1d1bb31 aliguori
                          int flags)
370 6658ffb8 pbrook
{
371 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
372 a1d1bb31 aliguori
    CPUWatchpoint *wp;
373 6658ffb8 pbrook
374 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
375 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
376 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
377 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
378 6658ffb8 pbrook
            return 0;
379 6658ffb8 pbrook
        }
380 6658ffb8 pbrook
    }
381 a1d1bb31 aliguori
    return -ENOENT;
382 6658ffb8 pbrook
}
383 6658ffb8 pbrook
384 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
385 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
386 a1d1bb31 aliguori
{
387 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
388 7d03f82f edgar_igl
389 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
390 a1d1bb31 aliguori
391 7267c094 Anthony Liguori
    g_free(watchpoint);
392 a1d1bb31 aliguori
}
393 a1d1bb31 aliguori
394 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
395 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
396 a1d1bb31 aliguori
{
397 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
398 a1d1bb31 aliguori
399 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
400 a1d1bb31 aliguori
        if (wp->flags & mask)
401 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
402 c0ce998e aliguori
    }
403 7d03f82f edgar_igl
}
404 c527ee8f Paul Brook
#endif
405 7d03f82f edgar_igl
406 a1d1bb31 aliguori
/* Add a breakpoint.  */
407 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
408 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
409 4c3a88a2 bellard
{
410 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
411 c0ce998e aliguori
    CPUBreakpoint *bp;
412 3b46e624 ths
413 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
414 4c3a88a2 bellard
415 a1d1bb31 aliguori
    bp->pc = pc;
416 a1d1bb31 aliguori
    bp->flags = flags;
417 a1d1bb31 aliguori
418 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
419 c0ce998e aliguori
    if (flags & BP_GDB)
420 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
421 c0ce998e aliguori
    else
422 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
423 3b46e624 ths
424 d720b93d bellard
    breakpoint_invalidate(env, pc);
425 a1d1bb31 aliguori
426 a1d1bb31 aliguori
    if (breakpoint)
427 a1d1bb31 aliguori
        *breakpoint = bp;
428 4c3a88a2 bellard
    return 0;
429 4c3a88a2 bellard
#else
430 a1d1bb31 aliguori
    return -ENOSYS;
431 4c3a88a2 bellard
#endif
432 4c3a88a2 bellard
}
433 4c3a88a2 bellard
434 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
435 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
436 a1d1bb31 aliguori
{
437 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
438 a1d1bb31 aliguori
    CPUBreakpoint *bp;
439 a1d1bb31 aliguori
440 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
441 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
442 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
443 a1d1bb31 aliguori
            return 0;
444 a1d1bb31 aliguori
        }
445 7d03f82f edgar_igl
    }
446 a1d1bb31 aliguori
    return -ENOENT;
447 a1d1bb31 aliguori
#else
448 a1d1bb31 aliguori
    return -ENOSYS;
449 7d03f82f edgar_igl
#endif
450 7d03f82f edgar_igl
}
451 7d03f82f edgar_igl
452 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
453 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
454 4c3a88a2 bellard
{
455 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
456 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
457 d720b93d bellard
458 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
459 a1d1bb31 aliguori
460 7267c094 Anthony Liguori
    g_free(breakpoint);
461 a1d1bb31 aliguori
#endif
462 a1d1bb31 aliguori
}
463 a1d1bb31 aliguori
464 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
465 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
466 a1d1bb31 aliguori
{
467 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
468 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
469 a1d1bb31 aliguori
470 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
471 a1d1bb31 aliguori
        if (bp->flags & mask)
472 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
473 c0ce998e aliguori
    }
474 4c3a88a2 bellard
#endif
475 4c3a88a2 bellard
}
476 4c3a88a2 bellard
477 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 c33a346e bellard
   CPU loop after each instruction */
479 9349b4f9 Andreas Färber
void cpu_single_step(CPUArchState *env, int enabled)
480 c33a346e bellard
{
481 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
482 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
483 c33a346e bellard
        env->singlestep_enabled = enabled;
484 e22a25c9 aliguori
        if (kvm_enabled())
485 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
486 e22a25c9 aliguori
        else {
487 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
488 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
489 e22a25c9 aliguori
            tb_flush(env);
490 e22a25c9 aliguori
        }
491 c33a346e bellard
    }
492 c33a346e bellard
#endif
493 c33a346e bellard
}
494 c33a346e bellard
495 9349b4f9 Andreas Färber
void cpu_exit(CPUArchState *env)
496 3098dba0 aurel32
{
497 fcd7d003 Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
498 fcd7d003 Andreas Färber
499 fcd7d003 Andreas Färber
    cpu->exit_request = 1;
500 378df4b2 Peter Maydell
    cpu->tcg_exit_req = 1;
501 3098dba0 aurel32
}
502 3098dba0 aurel32
503 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
504 7501267e bellard
{
505 7501267e bellard
    va_list ap;
506 493ae1f0 pbrook
    va_list ap2;
507 7501267e bellard
508 7501267e bellard
    va_start(ap, fmt);
509 493ae1f0 pbrook
    va_copy(ap2, ap);
510 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
511 7501267e bellard
    vfprintf(stderr, fmt, ap);
512 7501267e bellard
    fprintf(stderr, "\n");
513 6fd2a026 Peter Maydell
    cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
514 93fcfe39 aliguori
    if (qemu_log_enabled()) {
515 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
516 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
517 93fcfe39 aliguori
        qemu_log("\n");
518 6fd2a026 Peter Maydell
        log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
519 31b1a7b4 aliguori
        qemu_log_flush();
520 93fcfe39 aliguori
        qemu_log_close();
521 924edcae balrog
    }
522 493ae1f0 pbrook
    va_end(ap2);
523 f9373291 j_mayer
    va_end(ap);
524 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
525 fd052bf6 Riku Voipio
    {
526 fd052bf6 Riku Voipio
        struct sigaction act;
527 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
528 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
529 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
530 fd052bf6 Riku Voipio
    }
531 fd052bf6 Riku Voipio
#endif
532 7501267e bellard
    abort();
533 7501267e bellard
}
534 7501267e bellard
535 9349b4f9 Andreas Färber
CPUArchState *cpu_copy(CPUArchState *env)
536 c5be9f08 ths
{
537 9349b4f9 Andreas Färber
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 9349b4f9 Andreas Färber
    CPUArchState *next_cpu = new_env->next_cpu;
539 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
540 5a38f081 aliguori
    CPUBreakpoint *bp;
541 5a38f081 aliguori
    CPUWatchpoint *wp;
542 5a38f081 aliguori
#endif
543 5a38f081 aliguori
544 9349b4f9 Andreas Färber
    memcpy(new_env, env, sizeof(CPUArchState));
545 5a38f081 aliguori
546 55e5c285 Andreas Färber
    /* Preserve chaining. */
547 c5be9f08 ths
    new_env->next_cpu = next_cpu;
548 5a38f081 aliguori
549 5a38f081 aliguori
    /* Clone all break/watchpoints.
550 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
551 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
552 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
553 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
554 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
555 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
556 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 5a38f081 aliguori
    }
558 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
559 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 5a38f081 aliguori
                              wp->flags, NULL);
561 5a38f081 aliguori
    }
562 5a38f081 aliguori
#endif
563 5a38f081 aliguori
564 c5be9f08 ths
    return new_env;
565 c5be9f08 ths
}
566 c5be9f08 ths
567 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
568 d24981d3 Juan Quintela
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 d24981d3 Juan Quintela
                                      uintptr_t length)
570 d24981d3 Juan Quintela
{
571 d24981d3 Juan Quintela
    uintptr_t start1;
572 d24981d3 Juan Quintela
573 d24981d3 Juan Quintela
    /* we modify the TLB cache so that the dirty bit will be set again
574 d24981d3 Juan Quintela
       when accessing the range */
575 d24981d3 Juan Quintela
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
576 d24981d3 Juan Quintela
    /* Check that we don't span multiple blocks - this breaks the
577 d24981d3 Juan Quintela
       address comparisons below.  */
578 d24981d3 Juan Quintela
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
579 d24981d3 Juan Quintela
            != (end - 1) - start) {
580 d24981d3 Juan Quintela
        abort();
581 d24981d3 Juan Quintela
    }
582 d24981d3 Juan Quintela
    cpu_tlb_reset_dirty_all(start1, length);
583 d24981d3 Juan Quintela
584 d24981d3 Juan Quintela
}
585 d24981d3 Juan Quintela
586 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
587 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
588 0a962c02 bellard
                                     int dirty_flags)
589 1ccde1cb bellard
{
590 d24981d3 Juan Quintela
    uintptr_t length;
591 1ccde1cb bellard
592 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
593 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
594 1ccde1cb bellard
595 1ccde1cb bellard
    length = end - start;
596 1ccde1cb bellard
    if (length == 0)
597 1ccde1cb bellard
        return;
598 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
599 f23db169 bellard
600 d24981d3 Juan Quintela
    if (tcg_enabled()) {
601 d24981d3 Juan Quintela
        tlb_reset_dirty_range_all(start, end, length);
602 5579c7f3 pbrook
    }
603 1ccde1cb bellard
}
604 1ccde1cb bellard
605 8b9c99d9 Blue Swirl
static int cpu_physical_memory_set_dirty_tracking(int enable)
606 74576198 aliguori
{
607 f6f3fbca Michael S. Tsirkin
    int ret = 0;
608 74576198 aliguori
    in_migration = enable;
609 f6f3fbca Michael S. Tsirkin
    return ret;
610 74576198 aliguori
}
611 74576198 aliguori
612 a8170e5e Avi Kivity
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
613 e5548617 Blue Swirl
                                                   MemoryRegionSection *section,
614 e5548617 Blue Swirl
                                                   target_ulong vaddr,
615 a8170e5e Avi Kivity
                                                   hwaddr paddr,
616 e5548617 Blue Swirl
                                                   int prot,
617 e5548617 Blue Swirl
                                                   target_ulong *address)
618 e5548617 Blue Swirl
{
619 a8170e5e Avi Kivity
    hwaddr iotlb;
620 e5548617 Blue Swirl
    CPUWatchpoint *wp;
621 e5548617 Blue Swirl
622 cc5bea60 Blue Swirl
    if (memory_region_is_ram(section->mr)) {
623 e5548617 Blue Swirl
        /* Normal RAM.  */
624 e5548617 Blue Swirl
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
625 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, paddr);
626 e5548617 Blue Swirl
        if (!section->readonly) {
627 e5548617 Blue Swirl
            iotlb |= phys_section_notdirty;
628 e5548617 Blue Swirl
        } else {
629 e5548617 Blue Swirl
            iotlb |= phys_section_rom;
630 e5548617 Blue Swirl
        }
631 e5548617 Blue Swirl
    } else {
632 e5548617 Blue Swirl
        /* IO handlers are currently passed a physical address.
633 e5548617 Blue Swirl
           It would be nice to pass an offset from the base address
634 e5548617 Blue Swirl
           of that region.  This would avoid having to special case RAM,
635 e5548617 Blue Swirl
           and avoid full address decoding in every device.
636 e5548617 Blue Swirl
           We can't use the high bits of pd for this because
637 e5548617 Blue Swirl
           IO_MEM_ROMD uses these as a ram address.  */
638 e5548617 Blue Swirl
        iotlb = section - phys_sections;
639 cc5bea60 Blue Swirl
        iotlb += memory_region_section_addr(section, paddr);
640 e5548617 Blue Swirl
    }
641 e5548617 Blue Swirl
642 e5548617 Blue Swirl
    /* Make accesses to pages with watchpoints go via the
643 e5548617 Blue Swirl
       watchpoint trap routines.  */
644 e5548617 Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 e5548617 Blue Swirl
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 e5548617 Blue Swirl
            /* Avoid trapping reads of pages with a write breakpoint. */
647 e5548617 Blue Swirl
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 e5548617 Blue Swirl
                iotlb = phys_section_watch + paddr;
649 e5548617 Blue Swirl
                *address |= TLB_MMIO;
650 e5548617 Blue Swirl
                break;
651 e5548617 Blue Swirl
            }
652 e5548617 Blue Swirl
        }
653 e5548617 Blue Swirl
    }
654 e5548617 Blue Swirl
655 e5548617 Blue Swirl
    return iotlb;
656 e5548617 Blue Swirl
}
657 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
658 9fa3e853 bellard
659 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
660 8da3ff18 pbrook
661 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662 c04b2b78 Paul Brook
typedef struct subpage_t {
663 70c68e44 Avi Kivity
    MemoryRegion iomem;
664 a8170e5e Avi Kivity
    hwaddr base;
665 5312bd8b Avi Kivity
    uint16_t sub_section[TARGET_PAGE_SIZE];
666 c04b2b78 Paul Brook
} subpage_t;
667 c04b2b78 Paul Brook
668 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
669 5312bd8b Avi Kivity
                             uint16_t section);
670 a8170e5e Avi Kivity
static subpage_t *subpage_init(hwaddr base);
671 5312bd8b Avi Kivity
static void destroy_page_desc(uint16_t section_index)
672 54688b1e Avi Kivity
{
673 5312bd8b Avi Kivity
    MemoryRegionSection *section = &phys_sections[section_index];
674 5312bd8b Avi Kivity
    MemoryRegion *mr = section->mr;
675 54688b1e Avi Kivity
676 54688b1e Avi Kivity
    if (mr->subpage) {
677 54688b1e Avi Kivity
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 54688b1e Avi Kivity
        memory_region_destroy(&subpage->iomem);
679 54688b1e Avi Kivity
        g_free(subpage);
680 54688b1e Avi Kivity
    }
681 54688b1e Avi Kivity
}
682 54688b1e Avi Kivity
683 4346ae3e Avi Kivity
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
684 54688b1e Avi Kivity
{
685 54688b1e Avi Kivity
    unsigned i;
686 d6f2ea22 Avi Kivity
    PhysPageEntry *p;
687 54688b1e Avi Kivity
688 c19e8800 Avi Kivity
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
689 54688b1e Avi Kivity
        return;
690 54688b1e Avi Kivity
    }
691 54688b1e Avi Kivity
692 c19e8800 Avi Kivity
    p = phys_map_nodes[lp->ptr];
693 4346ae3e Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
694 07f07b31 Avi Kivity
        if (!p[i].is_leaf) {
695 54688b1e Avi Kivity
            destroy_l2_mapping(&p[i], level - 1);
696 4346ae3e Avi Kivity
        } else {
697 c19e8800 Avi Kivity
            destroy_page_desc(p[i].ptr);
698 54688b1e Avi Kivity
        }
699 54688b1e Avi Kivity
    }
700 07f07b31 Avi Kivity
    lp->is_leaf = 0;
701 c19e8800 Avi Kivity
    lp->ptr = PHYS_MAP_NODE_NIL;
702 54688b1e Avi Kivity
}
703 54688b1e Avi Kivity
704 ac1970fb Avi Kivity
static void destroy_all_mappings(AddressSpaceDispatch *d)
705 54688b1e Avi Kivity
{
706 ac1970fb Avi Kivity
    destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
707 d6f2ea22 Avi Kivity
    phys_map_nodes_reset();
708 54688b1e Avi Kivity
}
709 54688b1e Avi Kivity
710 5312bd8b Avi Kivity
static uint16_t phys_section_add(MemoryRegionSection *section)
711 5312bd8b Avi Kivity
{
712 5312bd8b Avi Kivity
    if (phys_sections_nb == phys_sections_nb_alloc) {
713 5312bd8b Avi Kivity
        phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 5312bd8b Avi Kivity
        phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 5312bd8b Avi Kivity
                                phys_sections_nb_alloc);
716 5312bd8b Avi Kivity
    }
717 5312bd8b Avi Kivity
    phys_sections[phys_sections_nb] = *section;
718 5312bd8b Avi Kivity
    return phys_sections_nb++;
719 5312bd8b Avi Kivity
}
720 5312bd8b Avi Kivity
721 5312bd8b Avi Kivity
static void phys_sections_clear(void)
722 5312bd8b Avi Kivity
{
723 5312bd8b Avi Kivity
    phys_sections_nb = 0;
724 5312bd8b Avi Kivity
}
725 5312bd8b Avi Kivity
726 ac1970fb Avi Kivity
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
727 0f0cb164 Avi Kivity
{
728 0f0cb164 Avi Kivity
    subpage_t *subpage;
729 a8170e5e Avi Kivity
    hwaddr base = section->offset_within_address_space
730 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
731 ac1970fb Avi Kivity
    MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
732 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
733 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
734 0f0cb164 Avi Kivity
        .size = TARGET_PAGE_SIZE,
735 0f0cb164 Avi Kivity
    };
736 a8170e5e Avi Kivity
    hwaddr start, end;
737 0f0cb164 Avi Kivity
738 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
739 0f0cb164 Avi Kivity
740 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
741 0f0cb164 Avi Kivity
        subpage = subpage_init(base);
742 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
743 ac1970fb Avi Kivity
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
744 2999097b Avi Kivity
                      phys_section_add(&subsection));
745 0f0cb164 Avi Kivity
    } else {
746 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
747 0f0cb164 Avi Kivity
    }
748 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
749 adb2a9b5 Tyler Hall
    end = start + section->size - 1;
750 0f0cb164 Avi Kivity
    subpage_register(subpage, start, end, phys_section_add(section));
751 0f0cb164 Avi Kivity
}
752 0f0cb164 Avi Kivity
753 0f0cb164 Avi Kivity
754 ac1970fb Avi Kivity
static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
755 33417e70 bellard
{
756 a8170e5e Avi Kivity
    hwaddr start_addr = section->offset_within_address_space;
757 dd81124b Avi Kivity
    ram_addr_t size = section->size;
758 a8170e5e Avi Kivity
    hwaddr addr;
759 5312bd8b Avi Kivity
    uint16_t section_index = phys_section_add(section);
760 dd81124b Avi Kivity
761 3b8e6a2d Edgar E. Iglesias
    assert(size);
762 f6f3fbca Michael S. Tsirkin
763 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
764 ac1970fb Avi Kivity
    phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
765 2999097b Avi Kivity
                  section_index);
766 33417e70 bellard
}
767 33417e70 bellard
768 ac1970fb Avi Kivity
static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
769 0f0cb164 Avi Kivity
{
770 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
771 0f0cb164 Avi Kivity
    MemoryRegionSection now = *section, remain = *section;
772 0f0cb164 Avi Kivity
773 0f0cb164 Avi Kivity
    if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 0f0cb164 Avi Kivity
        || (now.size < TARGET_PAGE_SIZE)) {
775 0f0cb164 Avi Kivity
        now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 0f0cb164 Avi Kivity
                       - now.offset_within_address_space,
777 0f0cb164 Avi Kivity
                       now.size);
778 ac1970fb Avi Kivity
        register_subpage(d, &now);
779 0f0cb164 Avi Kivity
        remain.size -= now.size;
780 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
781 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
782 0f0cb164 Avi Kivity
    }
783 69b67646 Tyler Hall
    while (remain.size >= TARGET_PAGE_SIZE) {
784 69b67646 Tyler Hall
        now = remain;
785 69b67646 Tyler Hall
        if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 69b67646 Tyler Hall
            now.size = TARGET_PAGE_SIZE;
787 ac1970fb Avi Kivity
            register_subpage(d, &now);
788 69b67646 Tyler Hall
        } else {
789 69b67646 Tyler Hall
            now.size &= TARGET_PAGE_MASK;
790 ac1970fb Avi Kivity
            register_multipage(d, &now);
791 69b67646 Tyler Hall
        }
792 0f0cb164 Avi Kivity
        remain.size -= now.size;
793 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
794 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
795 0f0cb164 Avi Kivity
    }
796 0f0cb164 Avi Kivity
    now = remain;
797 0f0cb164 Avi Kivity
    if (now.size) {
798 ac1970fb Avi Kivity
        register_subpage(d, &now);
799 0f0cb164 Avi Kivity
    }
800 0f0cb164 Avi Kivity
}
801 0f0cb164 Avi Kivity
802 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
803 62a2744c Sheng Yang
{
804 62a2744c Sheng Yang
    if (kvm_enabled())
805 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
806 62a2744c Sheng Yang
}
807 62a2744c Sheng Yang
808 b2a8658e Umesh Deshpande
void qemu_mutex_lock_ramlist(void)
809 b2a8658e Umesh Deshpande
{
810 b2a8658e Umesh Deshpande
    qemu_mutex_lock(&ram_list.mutex);
811 b2a8658e Umesh Deshpande
}
812 b2a8658e Umesh Deshpande
813 b2a8658e Umesh Deshpande
void qemu_mutex_unlock_ramlist(void)
814 b2a8658e Umesh Deshpande
{
815 b2a8658e Umesh Deshpande
    qemu_mutex_unlock(&ram_list.mutex);
816 b2a8658e Umesh Deshpande
}
817 b2a8658e Umesh Deshpande
818 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
819 c902760f Marcelo Tosatti
820 c902760f Marcelo Tosatti
#include <sys/vfs.h>
821 c902760f Marcelo Tosatti
822 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
823 c902760f Marcelo Tosatti
824 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
825 c902760f Marcelo Tosatti
{
826 c902760f Marcelo Tosatti
    struct statfs fs;
827 c902760f Marcelo Tosatti
    int ret;
828 c902760f Marcelo Tosatti
829 c902760f Marcelo Tosatti
    do {
830 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
831 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
832 c902760f Marcelo Tosatti
833 c902760f Marcelo Tosatti
    if (ret != 0) {
834 9742bf26 Yoshiaki Tamura
        perror(path);
835 9742bf26 Yoshiaki Tamura
        return 0;
836 c902760f Marcelo Tosatti
    }
837 c902760f Marcelo Tosatti
838 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
839 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
840 c902760f Marcelo Tosatti
841 c902760f Marcelo Tosatti
    return fs.f_bsize;
842 c902760f Marcelo Tosatti
}
843 c902760f Marcelo Tosatti
844 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
845 04b16653 Alex Williamson
                            ram_addr_t memory,
846 04b16653 Alex Williamson
                            const char *path)
847 c902760f Marcelo Tosatti
{
848 c902760f Marcelo Tosatti
    char *filename;
849 8ca761f6 Peter Feiner
    char *sanitized_name;
850 8ca761f6 Peter Feiner
    char *c;
851 c902760f Marcelo Tosatti
    void *area;
852 c902760f Marcelo Tosatti
    int fd;
853 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
854 c902760f Marcelo Tosatti
    int flags;
855 c902760f Marcelo Tosatti
#endif
856 c902760f Marcelo Tosatti
    unsigned long hpagesize;
857 c902760f Marcelo Tosatti
858 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
859 c902760f Marcelo Tosatti
    if (!hpagesize) {
860 9742bf26 Yoshiaki Tamura
        return NULL;
861 c902760f Marcelo Tosatti
    }
862 c902760f Marcelo Tosatti
863 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
864 c902760f Marcelo Tosatti
        return NULL;
865 c902760f Marcelo Tosatti
    }
866 c902760f Marcelo Tosatti
867 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
868 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
869 c902760f Marcelo Tosatti
        return NULL;
870 c902760f Marcelo Tosatti
    }
871 c902760f Marcelo Tosatti
872 8ca761f6 Peter Feiner
    /* Make name safe to use with mkstemp by replacing '/' with '_'. */
873 8ca761f6 Peter Feiner
    sanitized_name = g_strdup(block->mr->name);
874 8ca761f6 Peter Feiner
    for (c = sanitized_name; *c != '\0'; c++) {
875 8ca761f6 Peter Feiner
        if (*c == '/')
876 8ca761f6 Peter Feiner
            *c = '_';
877 8ca761f6 Peter Feiner
    }
878 8ca761f6 Peter Feiner
879 8ca761f6 Peter Feiner
    filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
880 8ca761f6 Peter Feiner
                               sanitized_name);
881 8ca761f6 Peter Feiner
    g_free(sanitized_name);
882 c902760f Marcelo Tosatti
883 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
884 c902760f Marcelo Tosatti
    if (fd < 0) {
885 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
886 e4ada482 Stefan Weil
        g_free(filename);
887 9742bf26 Yoshiaki Tamura
        return NULL;
888 c902760f Marcelo Tosatti
    }
889 c902760f Marcelo Tosatti
    unlink(filename);
890 e4ada482 Stefan Weil
    g_free(filename);
891 c902760f Marcelo Tosatti
892 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
893 c902760f Marcelo Tosatti
894 c902760f Marcelo Tosatti
    /*
895 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
896 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
897 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
898 c902760f Marcelo Tosatti
     * mmap will fail.
899 c902760f Marcelo Tosatti
     */
900 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
901 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
902 c902760f Marcelo Tosatti
903 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
904 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
905 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
906 c902760f Marcelo Tosatti
     * to sidestep this quirk.
907 c902760f Marcelo Tosatti
     */
908 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
909 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
910 c902760f Marcelo Tosatti
#else
911 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
912 c902760f Marcelo Tosatti
#endif
913 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
914 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
915 9742bf26 Yoshiaki Tamura
        close(fd);
916 9742bf26 Yoshiaki Tamura
        return (NULL);
917 c902760f Marcelo Tosatti
    }
918 04b16653 Alex Williamson
    block->fd = fd;
919 c902760f Marcelo Tosatti
    return area;
920 c902760f Marcelo Tosatti
}
921 c902760f Marcelo Tosatti
#endif
922 c902760f Marcelo Tosatti
923 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
924 d17b5288 Alex Williamson
{
925 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
926 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
927 04b16653 Alex Williamson
928 a3161038 Paolo Bonzini
    if (QTAILQ_EMPTY(&ram_list.blocks))
929 04b16653 Alex Williamson
        return 0;
930 04b16653 Alex Williamson
931 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
932 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
933 04b16653 Alex Williamson
934 04b16653 Alex Williamson
        end = block->offset + block->length;
935 04b16653 Alex Williamson
936 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
937 04b16653 Alex Williamson
            if (next_block->offset >= end) {
938 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
939 04b16653 Alex Williamson
            }
940 04b16653 Alex Williamson
        }
941 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
942 3e837b2c Alex Williamson
            offset = end;
943 04b16653 Alex Williamson
            mingap = next - end;
944 04b16653 Alex Williamson
        }
945 04b16653 Alex Williamson
    }
946 3e837b2c Alex Williamson
947 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
948 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
949 3e837b2c Alex Williamson
                (uint64_t)size);
950 3e837b2c Alex Williamson
        abort();
951 3e837b2c Alex Williamson
    }
952 3e837b2c Alex Williamson
953 04b16653 Alex Williamson
    return offset;
954 04b16653 Alex Williamson
}
955 04b16653 Alex Williamson
956 652d7ec2 Juan Quintela
ram_addr_t last_ram_offset(void)
957 04b16653 Alex Williamson
{
958 d17b5288 Alex Williamson
    RAMBlock *block;
959 d17b5288 Alex Williamson
    ram_addr_t last = 0;
960 d17b5288 Alex Williamson
961 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
962 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
963 d17b5288 Alex Williamson
964 d17b5288 Alex Williamson
    return last;
965 d17b5288 Alex Williamson
}
966 d17b5288 Alex Williamson
967 ddb97f1d Jason Baron
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
968 ddb97f1d Jason Baron
{
969 ddb97f1d Jason Baron
    int ret;
970 ddb97f1d Jason Baron
    QemuOpts *machine_opts;
971 ddb97f1d Jason Baron
972 ddb97f1d Jason Baron
    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
973 ddb97f1d Jason Baron
    machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
974 ddb97f1d Jason Baron
    if (machine_opts &&
975 ddb97f1d Jason Baron
        !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
976 ddb97f1d Jason Baron
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
977 ddb97f1d Jason Baron
        if (ret) {
978 ddb97f1d Jason Baron
            perror("qemu_madvise");
979 ddb97f1d Jason Baron
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
980 ddb97f1d Jason Baron
                            "but dump_guest_core=off specified\n");
981 ddb97f1d Jason Baron
        }
982 ddb97f1d Jason Baron
    }
983 ddb97f1d Jason Baron
}
984 ddb97f1d Jason Baron
985 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
986 84b89d78 Cam Macdonell
{
987 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
988 84b89d78 Cam Macdonell
989 c5705a77 Avi Kivity
    new_block = NULL;
990 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
991 c5705a77 Avi Kivity
        if (block->offset == addr) {
992 c5705a77 Avi Kivity
            new_block = block;
993 c5705a77 Avi Kivity
            break;
994 c5705a77 Avi Kivity
        }
995 c5705a77 Avi Kivity
    }
996 c5705a77 Avi Kivity
    assert(new_block);
997 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
998 84b89d78 Cam Macdonell
999 09e5ab63 Anthony Liguori
    if (dev) {
1000 09e5ab63 Anthony Liguori
        char *id = qdev_get_dev_path(dev);
1001 84b89d78 Cam Macdonell
        if (id) {
1002 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1003 7267c094 Anthony Liguori
            g_free(id);
1004 84b89d78 Cam Macdonell
        }
1005 84b89d78 Cam Macdonell
    }
1006 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1007 84b89d78 Cam Macdonell
1008 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1009 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1010 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1011 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1012 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1013 84b89d78 Cam Macdonell
                    new_block->idstr);
1014 84b89d78 Cam Macdonell
            abort();
1015 84b89d78 Cam Macdonell
        }
1016 84b89d78 Cam Macdonell
    }
1017 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1018 c5705a77 Avi Kivity
}
1019 c5705a77 Avi Kivity
1020 8490fc78 Luiz Capitulino
static int memory_try_enable_merging(void *addr, size_t len)
1021 8490fc78 Luiz Capitulino
{
1022 8490fc78 Luiz Capitulino
    QemuOpts *opts;
1023 8490fc78 Luiz Capitulino
1024 8490fc78 Luiz Capitulino
    opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1025 8490fc78 Luiz Capitulino
    if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1026 8490fc78 Luiz Capitulino
        /* disabled by the user */
1027 8490fc78 Luiz Capitulino
        return 0;
1028 8490fc78 Luiz Capitulino
    }
1029 8490fc78 Luiz Capitulino
1030 8490fc78 Luiz Capitulino
    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1031 8490fc78 Luiz Capitulino
}
1032 8490fc78 Luiz Capitulino
1033 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1034 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
1035 c5705a77 Avi Kivity
{
1036 abb26d63 Paolo Bonzini
    RAMBlock *block, *new_block;
1037 c5705a77 Avi Kivity
1038 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
1039 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
1040 84b89d78 Cam Macdonell
1041 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1042 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1043 7c637366 Avi Kivity
    new_block->mr = mr;
1044 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
1045 6977dfe6 Yoshiaki Tamura
    if (host) {
1046 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
1047 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
1048 6977dfe6 Yoshiaki Tamura
    } else {
1049 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
1050 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
1051 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
1052 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
1053 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
1054 8490fc78 Luiz Capitulino
                memory_try_enable_merging(new_block->host, size);
1055 6977dfe6 Yoshiaki Tamura
            }
1056 c902760f Marcelo Tosatti
#else
1057 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
1058 6977dfe6 Yoshiaki Tamura
            exit(1);
1059 c902760f Marcelo Tosatti
#endif
1060 6977dfe6 Yoshiaki Tamura
        } else {
1061 868bb33f Jan Kiszka
            if (xen_enabled()) {
1062 fce537d4 Avi Kivity
                xen_ram_alloc(new_block->offset, size, mr);
1063 fdec9918 Christian Borntraeger
            } else if (kvm_enabled()) {
1064 fdec9918 Christian Borntraeger
                /* some s390/kvm configurations have special constraints */
1065 fdec9918 Christian Borntraeger
                new_block->host = kvm_vmalloc(size);
1066 432d268c Jun Nakajima
            } else {
1067 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
1068 432d268c Jun Nakajima
            }
1069 8490fc78 Luiz Capitulino
            memory_try_enable_merging(new_block->host, size);
1070 6977dfe6 Yoshiaki Tamura
        }
1071 c902760f Marcelo Tosatti
    }
1072 94a6b54f pbrook
    new_block->length = size;
1073 94a6b54f pbrook
1074 abb26d63 Paolo Bonzini
    /* Keep the list sorted from biggest to smallest block.  */
1075 abb26d63 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1076 abb26d63 Paolo Bonzini
        if (block->length < new_block->length) {
1077 abb26d63 Paolo Bonzini
            break;
1078 abb26d63 Paolo Bonzini
        }
1079 abb26d63 Paolo Bonzini
    }
1080 abb26d63 Paolo Bonzini
    if (block) {
1081 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_BEFORE(block, new_block, next);
1082 abb26d63 Paolo Bonzini
    } else {
1083 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1084 abb26d63 Paolo Bonzini
    }
1085 0d6d3c87 Paolo Bonzini
    ram_list.mru_block = NULL;
1086 94a6b54f pbrook
1087 f798b07f Umesh Deshpande
    ram_list.version++;
1088 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1089 f798b07f Umesh Deshpande
1090 7267c094 Anthony Liguori
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1091 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
1092 5fda043f Igor Mitsyanko
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1093 5fda043f Igor Mitsyanko
           0, size >> TARGET_PAGE_BITS);
1094 1720aeee Juan Quintela
    cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1095 94a6b54f pbrook
1096 ddb97f1d Jason Baron
    qemu_ram_setup_dump(new_block->host, size);
1097 ad0b5321 Luiz Capitulino
    qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1098 ddb97f1d Jason Baron
1099 6f0437e8 Jan Kiszka
    if (kvm_enabled())
1100 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
1101 6f0437e8 Jan Kiszka
1102 94a6b54f pbrook
    return new_block->offset;
1103 94a6b54f pbrook
}
1104 e9a1ab19 bellard
1105 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1106 6977dfe6 Yoshiaki Tamura
{
1107 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
1108 6977dfe6 Yoshiaki Tamura
}
1109 6977dfe6 Yoshiaki Tamura
1110 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
1111 1f2e98b6 Alex Williamson
{
1112 1f2e98b6 Alex Williamson
    RAMBlock *block;
1113 1f2e98b6 Alex Williamson
1114 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1115 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1116 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1117 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
1118 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1119 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1120 f798b07f Umesh Deshpande
            ram_list.version++;
1121 7267c094 Anthony Liguori
            g_free(block);
1122 b2a8658e Umesh Deshpande
            break;
1123 1f2e98b6 Alex Williamson
        }
1124 1f2e98b6 Alex Williamson
    }
1125 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1126 1f2e98b6 Alex Williamson
}
1127 1f2e98b6 Alex Williamson
1128 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
1129 e9a1ab19 bellard
{
1130 04b16653 Alex Williamson
    RAMBlock *block;
1131 04b16653 Alex Williamson
1132 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1133 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1134 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1135 04b16653 Alex Williamson
        if (addr == block->offset) {
1136 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1137 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1138 f798b07f Umesh Deshpande
            ram_list.version++;
1139 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1140 cd19cfa2 Huang Ying
                ;
1141 cd19cfa2 Huang Ying
            } else if (mem_path) {
1142 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
1143 04b16653 Alex Williamson
                if (block->fd) {
1144 04b16653 Alex Williamson
                    munmap(block->host, block->length);
1145 04b16653 Alex Williamson
                    close(block->fd);
1146 04b16653 Alex Williamson
                } else {
1147 04b16653 Alex Williamson
                    qemu_vfree(block->host);
1148 04b16653 Alex Williamson
                }
1149 fd28aa13 Jan Kiszka
#else
1150 fd28aa13 Jan Kiszka
                abort();
1151 04b16653 Alex Williamson
#endif
1152 04b16653 Alex Williamson
            } else {
1153 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1154 04b16653 Alex Williamson
                munmap(block->host, block->length);
1155 04b16653 Alex Williamson
#else
1156 868bb33f Jan Kiszka
                if (xen_enabled()) {
1157 e41d7c69 Jan Kiszka
                    xen_invalidate_map_cache_entry(block->host);
1158 432d268c Jun Nakajima
                } else {
1159 432d268c Jun Nakajima
                    qemu_vfree(block->host);
1160 432d268c Jun Nakajima
                }
1161 04b16653 Alex Williamson
#endif
1162 04b16653 Alex Williamson
            }
1163 7267c094 Anthony Liguori
            g_free(block);
1164 b2a8658e Umesh Deshpande
            break;
1165 04b16653 Alex Williamson
        }
1166 04b16653 Alex Williamson
    }
1167 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1168 04b16653 Alex Williamson
1169 e9a1ab19 bellard
}
1170 e9a1ab19 bellard
1171 cd19cfa2 Huang Ying
#ifndef _WIN32
1172 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1173 cd19cfa2 Huang Ying
{
1174 cd19cfa2 Huang Ying
    RAMBlock *block;
1175 cd19cfa2 Huang Ying
    ram_addr_t offset;
1176 cd19cfa2 Huang Ying
    int flags;
1177 cd19cfa2 Huang Ying
    void *area, *vaddr;
1178 cd19cfa2 Huang Ying
1179 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1180 cd19cfa2 Huang Ying
        offset = addr - block->offset;
1181 cd19cfa2 Huang Ying
        if (offset < block->length) {
1182 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
1183 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1184 cd19cfa2 Huang Ying
                ;
1185 cd19cfa2 Huang Ying
            } else {
1186 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
1187 cd19cfa2 Huang Ying
                munmap(vaddr, length);
1188 cd19cfa2 Huang Ying
                if (mem_path) {
1189 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
1190 cd19cfa2 Huang Ying
                    if (block->fd) {
1191 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
1192 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1193 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
1194 cd19cfa2 Huang Ying
#else
1195 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
1196 cd19cfa2 Huang Ying
#endif
1197 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1198 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
1199 cd19cfa2 Huang Ying
                    } else {
1200 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1201 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1202 cd19cfa2 Huang Ying
                                    flags, -1, 0);
1203 cd19cfa2 Huang Ying
                    }
1204 fd28aa13 Jan Kiszka
#else
1205 fd28aa13 Jan Kiszka
                    abort();
1206 cd19cfa2 Huang Ying
#endif
1207 cd19cfa2 Huang Ying
                } else {
1208 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1209 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
1210 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1211 cd19cfa2 Huang Ying
                                flags, -1, 0);
1212 cd19cfa2 Huang Ying
#else
1213 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1214 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1215 cd19cfa2 Huang Ying
                                flags, -1, 0);
1216 cd19cfa2 Huang Ying
#endif
1217 cd19cfa2 Huang Ying
                }
1218 cd19cfa2 Huang Ying
                if (area != vaddr) {
1219 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
1220 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1221 cd19cfa2 Huang Ying
                            length, addr);
1222 cd19cfa2 Huang Ying
                    exit(1);
1223 cd19cfa2 Huang Ying
                }
1224 8490fc78 Luiz Capitulino
                memory_try_enable_merging(vaddr, length);
1225 ddb97f1d Jason Baron
                qemu_ram_setup_dump(vaddr, length);
1226 cd19cfa2 Huang Ying
            }
1227 cd19cfa2 Huang Ying
            return;
1228 cd19cfa2 Huang Ying
        }
1229 cd19cfa2 Huang Ying
    }
1230 cd19cfa2 Huang Ying
}
1231 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
1232 cd19cfa2 Huang Ying
1233 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
1234 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
1235 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
1236 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
1237 5579c7f3 pbrook

1238 5579c7f3 pbrook
   It should not be used for general purpose DMA.
1239 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1240 5579c7f3 pbrook
 */
1241 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
1242 dc828ca1 pbrook
{
1243 94a6b54f pbrook
    RAMBlock *block;
1244 94a6b54f pbrook
1245 b2a8658e Umesh Deshpande
    /* The list is protected by the iothread lock here.  */
1246 0d6d3c87 Paolo Bonzini
    block = ram_list.mru_block;
1247 0d6d3c87 Paolo Bonzini
    if (block && addr - block->offset < block->length) {
1248 0d6d3c87 Paolo Bonzini
        goto found;
1249 0d6d3c87 Paolo Bonzini
    }
1250 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1251 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
1252 0d6d3c87 Paolo Bonzini
            goto found;
1253 f471a17e Alex Williamson
        }
1254 94a6b54f pbrook
    }
1255 f471a17e Alex Williamson
1256 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1257 f471a17e Alex Williamson
    abort();
1258 f471a17e Alex Williamson
1259 0d6d3c87 Paolo Bonzini
found:
1260 0d6d3c87 Paolo Bonzini
    ram_list.mru_block = block;
1261 0d6d3c87 Paolo Bonzini
    if (xen_enabled()) {
1262 0d6d3c87 Paolo Bonzini
        /* We need to check if the requested address is in the RAM
1263 0d6d3c87 Paolo Bonzini
         * because we don't want to map the entire memory in QEMU.
1264 0d6d3c87 Paolo Bonzini
         * In that case just map until the end of the page.
1265 0d6d3c87 Paolo Bonzini
         */
1266 0d6d3c87 Paolo Bonzini
        if (block->offset == 0) {
1267 0d6d3c87 Paolo Bonzini
            return xen_map_cache(addr, 0, 0);
1268 0d6d3c87 Paolo Bonzini
        } else if (block->host == NULL) {
1269 0d6d3c87 Paolo Bonzini
            block->host =
1270 0d6d3c87 Paolo Bonzini
                xen_map_cache(block->offset, block->length, 1);
1271 0d6d3c87 Paolo Bonzini
        }
1272 0d6d3c87 Paolo Bonzini
    }
1273 0d6d3c87 Paolo Bonzini
    return block->host + (addr - block->offset);
1274 dc828ca1 pbrook
}
1275 dc828ca1 pbrook
1276 0d6d3c87 Paolo Bonzini
/* Return a host pointer to ram allocated with qemu_ram_alloc.  Same as
1277 0d6d3c87 Paolo Bonzini
 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1278 0d6d3c87 Paolo Bonzini
 *
1279 0d6d3c87 Paolo Bonzini
 * ??? Is this still necessary?
1280 b2e0a138 Michael S. Tsirkin
 */
1281 8b9c99d9 Blue Swirl
static void *qemu_safe_ram_ptr(ram_addr_t addr)
1282 b2e0a138 Michael S. Tsirkin
{
1283 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
1284 b2e0a138 Michael S. Tsirkin
1285 b2a8658e Umesh Deshpande
    /* The list is protected by the iothread lock here.  */
1286 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1287 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
1288 868bb33f Jan Kiszka
            if (xen_enabled()) {
1289 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
1290 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
1291 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
1292 432d268c Jun Nakajima
                 */
1293 432d268c Jun Nakajima
                if (block->offset == 0) {
1294 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
1295 432d268c Jun Nakajima
                } else if (block->host == NULL) {
1296 e41d7c69 Jan Kiszka
                    block->host =
1297 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
1298 432d268c Jun Nakajima
                }
1299 432d268c Jun Nakajima
            }
1300 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
1301 b2e0a138 Michael S. Tsirkin
        }
1302 b2e0a138 Michael S. Tsirkin
    }
1303 b2e0a138 Michael S. Tsirkin
1304 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1305 b2e0a138 Michael S. Tsirkin
    abort();
1306 b2e0a138 Michael S. Tsirkin
1307 b2e0a138 Michael S. Tsirkin
    return NULL;
1308 b2e0a138 Michael S. Tsirkin
}
1309 b2e0a138 Michael S. Tsirkin
1310 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1311 38bee5dc Stefano Stabellini
 * but takes a size argument */
1312 8b9c99d9 Blue Swirl
static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1313 38bee5dc Stefano Stabellini
{
1314 8ab934f9 Stefano Stabellini
    if (*size == 0) {
1315 8ab934f9 Stefano Stabellini
        return NULL;
1316 8ab934f9 Stefano Stabellini
    }
1317 868bb33f Jan Kiszka
    if (xen_enabled()) {
1318 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
1319 868bb33f Jan Kiszka
    } else {
1320 38bee5dc Stefano Stabellini
        RAMBlock *block;
1321 38bee5dc Stefano Stabellini
1322 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1323 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
1324 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
1325 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
1326 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
1327 38bee5dc Stefano Stabellini
            }
1328 38bee5dc Stefano Stabellini
        }
1329 38bee5dc Stefano Stabellini
1330 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1331 38bee5dc Stefano Stabellini
        abort();
1332 38bee5dc Stefano Stabellini
    }
1333 38bee5dc Stefano Stabellini
}
1334 38bee5dc Stefano Stabellini
1335 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
1336 050a0ddf Anthony PERARD
{
1337 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
1338 050a0ddf Anthony PERARD
}
1339 050a0ddf Anthony PERARD
1340 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1341 5579c7f3 pbrook
{
1342 94a6b54f pbrook
    RAMBlock *block;
1343 94a6b54f pbrook
    uint8_t *host = ptr;
1344 94a6b54f pbrook
1345 868bb33f Jan Kiszka
    if (xen_enabled()) {
1346 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
1347 712c2b41 Stefano Stabellini
        return 0;
1348 712c2b41 Stefano Stabellini
    }
1349 712c2b41 Stefano Stabellini
1350 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1351 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
1352 432d268c Jun Nakajima
        if (block->host == NULL) {
1353 432d268c Jun Nakajima
            continue;
1354 432d268c Jun Nakajima
        }
1355 f471a17e Alex Williamson
        if (host - block->host < block->length) {
1356 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
1357 e890261f Marcelo Tosatti
            return 0;
1358 f471a17e Alex Williamson
        }
1359 94a6b54f pbrook
    }
1360 432d268c Jun Nakajima
1361 e890261f Marcelo Tosatti
    return -1;
1362 e890261f Marcelo Tosatti
}
1363 f471a17e Alex Williamson
1364 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
1365 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
1366 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1367 e890261f Marcelo Tosatti
{
1368 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
1369 f471a17e Alex Williamson
1370 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1371 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
1372 e890261f Marcelo Tosatti
        abort();
1373 e890261f Marcelo Tosatti
    }
1374 e890261f Marcelo Tosatti
    return ram_addr;
1375 5579c7f3 pbrook
}
1376 5579c7f3 pbrook
1377 a8170e5e Avi Kivity
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1378 0e0df1e2 Avi Kivity
                                    unsigned size)
1379 e18231a3 blueswir1
{
1380 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
1381 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1382 e18231a3 blueswir1
#endif
1383 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1384 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
1385 e18231a3 blueswir1
#endif
1386 e18231a3 blueswir1
    return 0;
1387 e18231a3 blueswir1
}
1388 e18231a3 blueswir1
1389 a8170e5e Avi Kivity
static void unassigned_mem_write(void *opaque, hwaddr addr,
1390 0e0df1e2 Avi Kivity
                                 uint64_t val, unsigned size)
1391 e18231a3 blueswir1
{
1392 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
1393 0e0df1e2 Avi Kivity
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1394 e18231a3 blueswir1
#endif
1395 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1396 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
1397 b4f0a316 blueswir1
#endif
1398 33417e70 bellard
}
1399 33417e70 bellard
1400 0e0df1e2 Avi Kivity
static const MemoryRegionOps unassigned_mem_ops = {
1401 0e0df1e2 Avi Kivity
    .read = unassigned_mem_read,
1402 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
1403 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1404 0e0df1e2 Avi Kivity
};
1405 e18231a3 blueswir1
1406 a8170e5e Avi Kivity
static uint64_t error_mem_read(void *opaque, hwaddr addr,
1407 0e0df1e2 Avi Kivity
                               unsigned size)
1408 e18231a3 blueswir1
{
1409 0e0df1e2 Avi Kivity
    abort();
1410 e18231a3 blueswir1
}
1411 e18231a3 blueswir1
1412 a8170e5e Avi Kivity
static void error_mem_write(void *opaque, hwaddr addr,
1413 0e0df1e2 Avi Kivity
                            uint64_t value, unsigned size)
1414 e18231a3 blueswir1
{
1415 0e0df1e2 Avi Kivity
    abort();
1416 33417e70 bellard
}
1417 33417e70 bellard
1418 0e0df1e2 Avi Kivity
static const MemoryRegionOps error_mem_ops = {
1419 0e0df1e2 Avi Kivity
    .read = error_mem_read,
1420 0e0df1e2 Avi Kivity
    .write = error_mem_write,
1421 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1422 33417e70 bellard
};
1423 33417e70 bellard
1424 0e0df1e2 Avi Kivity
static const MemoryRegionOps rom_mem_ops = {
1425 0e0df1e2 Avi Kivity
    .read = error_mem_read,
1426 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
1427 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1428 33417e70 bellard
};
1429 33417e70 bellard
1430 a8170e5e Avi Kivity
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1431 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
1432 9fa3e853 bellard
{
1433 3a7d929e bellard
    int dirty_flags;
1434 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1435 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1436 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1437 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
1438 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1439 9fa3e853 bellard
#endif
1440 3a7d929e bellard
    }
1441 0e0df1e2 Avi Kivity
    switch (size) {
1442 0e0df1e2 Avi Kivity
    case 1:
1443 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
1444 0e0df1e2 Avi Kivity
        break;
1445 0e0df1e2 Avi Kivity
    case 2:
1446 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
1447 0e0df1e2 Avi Kivity
        break;
1448 0e0df1e2 Avi Kivity
    case 4:
1449 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
1450 0e0df1e2 Avi Kivity
        break;
1451 0e0df1e2 Avi Kivity
    default:
1452 0e0df1e2 Avi Kivity
        abort();
1453 3a7d929e bellard
    }
1454 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1455 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1456 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
1457 f23db169 bellard
       flushed */
1458 f23db169 bellard
    if (dirty_flags == 0xff)
1459 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1460 9fa3e853 bellard
}
1461 9fa3e853 bellard
1462 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
1463 0e0df1e2 Avi Kivity
    .read = error_mem_read,
1464 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
1465 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1466 1ccde1cb bellard
};
1467 1ccde1cb bellard
1468 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
1469 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
1470 0f459d16 pbrook
{
1471 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1472 06d55cc1 aliguori
    target_ulong pc, cs_base;
1473 0f459d16 pbrook
    target_ulong vaddr;
1474 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1475 06d55cc1 aliguori
    int cpu_flags;
1476 0f459d16 pbrook
1477 06d55cc1 aliguori
    if (env->watchpoint_hit) {
1478 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
1479 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
1480 06d55cc1 aliguori
         * current instruction. */
1481 c3affe56 Andreas Färber
        cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1482 06d55cc1 aliguori
        return;
1483 06d55cc1 aliguori
    }
1484 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1485 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1486 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
1487 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1488 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
1489 6e140f28 aliguori
            if (!env->watchpoint_hit) {
1490 6e140f28 aliguori
                env->watchpoint_hit = wp;
1491 5a316526 Blue Swirl
                tb_check_watchpoint(env);
1492 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1493 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
1494 488d6577 Max Filippov
                    cpu_loop_exit(env);
1495 6e140f28 aliguori
                } else {
1496 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1497 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1498 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
1499 6e140f28 aliguori
                }
1500 06d55cc1 aliguori
            }
1501 6e140f28 aliguori
        } else {
1502 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
1503 0f459d16 pbrook
        }
1504 0f459d16 pbrook
    }
1505 0f459d16 pbrook
}
1506 0f459d16 pbrook
1507 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
1508 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
1509 6658ffb8 pbrook
   phys routines.  */
1510 a8170e5e Avi Kivity
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1511 1ec9b909 Avi Kivity
                               unsigned size)
1512 6658ffb8 pbrook
{
1513 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1514 1ec9b909 Avi Kivity
    switch (size) {
1515 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
1516 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
1517 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
1518 1ec9b909 Avi Kivity
    default: abort();
1519 1ec9b909 Avi Kivity
    }
1520 6658ffb8 pbrook
}
1521 6658ffb8 pbrook
1522 a8170e5e Avi Kivity
static void watch_mem_write(void *opaque, hwaddr addr,
1523 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
1524 6658ffb8 pbrook
{
1525 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1526 1ec9b909 Avi Kivity
    switch (size) {
1527 67364150 Max Filippov
    case 1:
1528 67364150 Max Filippov
        stb_phys(addr, val);
1529 67364150 Max Filippov
        break;
1530 67364150 Max Filippov
    case 2:
1531 67364150 Max Filippov
        stw_phys(addr, val);
1532 67364150 Max Filippov
        break;
1533 67364150 Max Filippov
    case 4:
1534 67364150 Max Filippov
        stl_phys(addr, val);
1535 67364150 Max Filippov
        break;
1536 1ec9b909 Avi Kivity
    default: abort();
1537 1ec9b909 Avi Kivity
    }
1538 6658ffb8 pbrook
}
1539 6658ffb8 pbrook
1540 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
1541 1ec9b909 Avi Kivity
    .read = watch_mem_read,
1542 1ec9b909 Avi Kivity
    .write = watch_mem_write,
1543 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1544 6658ffb8 pbrook
};
1545 6658ffb8 pbrook
1546 a8170e5e Avi Kivity
static uint64_t subpage_read(void *opaque, hwaddr addr,
1547 70c68e44 Avi Kivity
                             unsigned len)
1548 db7b5426 blueswir1
{
1549 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
1550 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
1551 5312bd8b Avi Kivity
    MemoryRegionSection *section;
1552 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1553 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1554 db7b5426 blueswir1
           mmio, len, addr, idx);
1555 db7b5426 blueswir1
#endif
1556 db7b5426 blueswir1
1557 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
1558 5312bd8b Avi Kivity
    addr += mmio->base;
1559 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
1560 5312bd8b Avi Kivity
    addr += section->offset_within_region;
1561 37ec01d4 Avi Kivity
    return io_mem_read(section->mr, addr, len);
1562 db7b5426 blueswir1
}
1563 db7b5426 blueswir1
1564 a8170e5e Avi Kivity
static void subpage_write(void *opaque, hwaddr addr,
1565 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
1566 db7b5426 blueswir1
{
1567 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
1568 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
1569 5312bd8b Avi Kivity
    MemoryRegionSection *section;
1570 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1571 70c68e44 Avi Kivity
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1572 70c68e44 Avi Kivity
           " idx %d value %"PRIx64"\n",
1573 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
1574 db7b5426 blueswir1
#endif
1575 f6405247 Richard Henderson
1576 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
1577 5312bd8b Avi Kivity
    addr += mmio->base;
1578 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
1579 5312bd8b Avi Kivity
    addr += section->offset_within_region;
1580 37ec01d4 Avi Kivity
    io_mem_write(section->mr, addr, value, len);
1581 db7b5426 blueswir1
}
1582 db7b5426 blueswir1
1583 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
1584 70c68e44 Avi Kivity
    .read = subpage_read,
1585 70c68e44 Avi Kivity
    .write = subpage_write,
1586 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1587 db7b5426 blueswir1
};
1588 db7b5426 blueswir1
1589 a8170e5e Avi Kivity
static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
1590 de712f94 Avi Kivity
                                 unsigned size)
1591 56384e8b Andreas Färber
{
1592 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
1593 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
1594 de712f94 Avi Kivity
    switch (size) {
1595 de712f94 Avi Kivity
    case 1: return ldub_p(ptr);
1596 de712f94 Avi Kivity
    case 2: return lduw_p(ptr);
1597 de712f94 Avi Kivity
    case 4: return ldl_p(ptr);
1598 de712f94 Avi Kivity
    default: abort();
1599 de712f94 Avi Kivity
    }
1600 56384e8b Andreas Färber
}
1601 56384e8b Andreas Färber
1602 a8170e5e Avi Kivity
static void subpage_ram_write(void *opaque, hwaddr addr,
1603 de712f94 Avi Kivity
                              uint64_t value, unsigned size)
1604 56384e8b Andreas Färber
{
1605 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
1606 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
1607 de712f94 Avi Kivity
    switch (size) {
1608 de712f94 Avi Kivity
    case 1: return stb_p(ptr, value);
1609 de712f94 Avi Kivity
    case 2: return stw_p(ptr, value);
1610 de712f94 Avi Kivity
    case 4: return stl_p(ptr, value);
1611 de712f94 Avi Kivity
    default: abort();
1612 de712f94 Avi Kivity
    }
1613 56384e8b Andreas Färber
}
1614 56384e8b Andreas Färber
1615 de712f94 Avi Kivity
static const MemoryRegionOps subpage_ram_ops = {
1616 de712f94 Avi Kivity
    .read = subpage_ram_read,
1617 de712f94 Avi Kivity
    .write = subpage_ram_write,
1618 de712f94 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1619 56384e8b Andreas Färber
};
1620 56384e8b Andreas Färber
1621 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1622 5312bd8b Avi Kivity
                             uint16_t section)
1623 db7b5426 blueswir1
{
1624 db7b5426 blueswir1
    int idx, eidx;
1625 db7b5426 blueswir1
1626 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1627 db7b5426 blueswir1
        return -1;
1628 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
1629 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
1630 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1631 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1632 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
1633 db7b5426 blueswir1
#endif
1634 5312bd8b Avi Kivity
    if (memory_region_is_ram(phys_sections[section].mr)) {
1635 5312bd8b Avi Kivity
        MemoryRegionSection new_section = phys_sections[section];
1636 5312bd8b Avi Kivity
        new_section.mr = &io_mem_subpage_ram;
1637 5312bd8b Avi Kivity
        section = phys_section_add(&new_section);
1638 56384e8b Andreas Färber
    }
1639 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
1640 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
1641 db7b5426 blueswir1
    }
1642 db7b5426 blueswir1
1643 db7b5426 blueswir1
    return 0;
1644 db7b5426 blueswir1
}
1645 db7b5426 blueswir1
1646 a8170e5e Avi Kivity
static subpage_t *subpage_init(hwaddr base)
1647 db7b5426 blueswir1
{
1648 c227f099 Anthony Liguori
    subpage_t *mmio;
1649 db7b5426 blueswir1
1650 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
1651 1eec614b aliguori
1652 1eec614b aliguori
    mmio->base = base;
1653 70c68e44 Avi Kivity
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1654 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
1655 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
1656 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1657 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1658 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1659 db7b5426 blueswir1
#endif
1660 0f0cb164 Avi Kivity
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1661 db7b5426 blueswir1
1662 db7b5426 blueswir1
    return mmio;
1663 db7b5426 blueswir1
}
1664 db7b5426 blueswir1
1665 5312bd8b Avi Kivity
static uint16_t dummy_section(MemoryRegion *mr)
1666 5312bd8b Avi Kivity
{
1667 5312bd8b Avi Kivity
    MemoryRegionSection section = {
1668 5312bd8b Avi Kivity
        .mr = mr,
1669 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
1670 5312bd8b Avi Kivity
        .offset_within_region = 0,
1671 5312bd8b Avi Kivity
        .size = UINT64_MAX,
1672 5312bd8b Avi Kivity
    };
1673 5312bd8b Avi Kivity
1674 5312bd8b Avi Kivity
    return phys_section_add(&section);
1675 5312bd8b Avi Kivity
}
1676 5312bd8b Avi Kivity
1677 a8170e5e Avi Kivity
MemoryRegion *iotlb_to_region(hwaddr index)
1678 aa102231 Avi Kivity
{
1679 37ec01d4 Avi Kivity
    return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1680 aa102231 Avi Kivity
}
1681 aa102231 Avi Kivity
1682 e9179ce1 Avi Kivity
static void io_mem_init(void)
1683 e9179ce1 Avi Kivity
{
1684 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
1685 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1686 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1687 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
1688 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1689 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
1690 de712f94 Avi Kivity
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1691 de712f94 Avi Kivity
                          "subpage-ram", UINT64_MAX);
1692 1ec9b909 Avi Kivity
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1693 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
1694 e9179ce1 Avi Kivity
}
1695 e9179ce1 Avi Kivity
1696 ac1970fb Avi Kivity
static void mem_begin(MemoryListener *listener)
1697 ac1970fb Avi Kivity
{
1698 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1699 ac1970fb Avi Kivity
1700 ac1970fb Avi Kivity
    destroy_all_mappings(d);
1701 ac1970fb Avi Kivity
    d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1702 ac1970fb Avi Kivity
}
1703 ac1970fb Avi Kivity
1704 50c1e149 Avi Kivity
static void core_begin(MemoryListener *listener)
1705 50c1e149 Avi Kivity
{
1706 5312bd8b Avi Kivity
    phys_sections_clear();
1707 5312bd8b Avi Kivity
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
1708 aa102231 Avi Kivity
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
1709 aa102231 Avi Kivity
    phys_section_rom = dummy_section(&io_mem_rom);
1710 aa102231 Avi Kivity
    phys_section_watch = dummy_section(&io_mem_watch);
1711 50c1e149 Avi Kivity
}
1712 50c1e149 Avi Kivity
1713 1d71148e Avi Kivity
static void tcg_commit(MemoryListener *listener)
1714 50c1e149 Avi Kivity
{
1715 9349b4f9 Andreas Färber
    CPUArchState *env;
1716 117712c3 Avi Kivity
1717 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
1718 117712c3 Avi Kivity
       reset the modified entries */
1719 117712c3 Avi Kivity
    /* XXX: slow ! */
1720 117712c3 Avi Kivity
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1721 117712c3 Avi Kivity
        tlb_flush(env, 1);
1722 117712c3 Avi Kivity
    }
1723 50c1e149 Avi Kivity
}
1724 50c1e149 Avi Kivity
1725 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
1726 93632747 Avi Kivity
{
1727 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(1);
1728 93632747 Avi Kivity
}
1729 93632747 Avi Kivity
1730 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
1731 93632747 Avi Kivity
{
1732 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(0);
1733 93632747 Avi Kivity
}
1734 93632747 Avi Kivity
1735 4855d41a Avi Kivity
static void io_region_add(MemoryListener *listener,
1736 4855d41a Avi Kivity
                          MemoryRegionSection *section)
1737 4855d41a Avi Kivity
{
1738 a2d33521 Avi Kivity
    MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1739 a2d33521 Avi Kivity
1740 a2d33521 Avi Kivity
    mrio->mr = section->mr;
1741 a2d33521 Avi Kivity
    mrio->offset = section->offset_within_region;
1742 a2d33521 Avi Kivity
    iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1743 4855d41a Avi Kivity
                 section->offset_within_address_space, section->size);
1744 a2d33521 Avi Kivity
    ioport_register(&mrio->iorange);
1745 4855d41a Avi Kivity
}
1746 4855d41a Avi Kivity
1747 4855d41a Avi Kivity
static void io_region_del(MemoryListener *listener,
1748 4855d41a Avi Kivity
                          MemoryRegionSection *section)
1749 4855d41a Avi Kivity
{
1750 4855d41a Avi Kivity
    isa_unassign_ioport(section->offset_within_address_space, section->size);
1751 4855d41a Avi Kivity
}
1752 4855d41a Avi Kivity
1753 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
1754 50c1e149 Avi Kivity
    .begin = core_begin,
1755 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
1756 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
1757 ac1970fb Avi Kivity
    .priority = 1,
1758 93632747 Avi Kivity
};
1759 93632747 Avi Kivity
1760 4855d41a Avi Kivity
static MemoryListener io_memory_listener = {
1761 4855d41a Avi Kivity
    .region_add = io_region_add,
1762 4855d41a Avi Kivity
    .region_del = io_region_del,
1763 4855d41a Avi Kivity
    .priority = 0,
1764 4855d41a Avi Kivity
};
1765 4855d41a Avi Kivity
1766 1d71148e Avi Kivity
static MemoryListener tcg_memory_listener = {
1767 1d71148e Avi Kivity
    .commit = tcg_commit,
1768 1d71148e Avi Kivity
};
1769 1d71148e Avi Kivity
1770 ac1970fb Avi Kivity
void address_space_init_dispatch(AddressSpace *as)
1771 ac1970fb Avi Kivity
{
1772 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1773 ac1970fb Avi Kivity
1774 ac1970fb Avi Kivity
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1775 ac1970fb Avi Kivity
    d->listener = (MemoryListener) {
1776 ac1970fb Avi Kivity
        .begin = mem_begin,
1777 ac1970fb Avi Kivity
        .region_add = mem_add,
1778 ac1970fb Avi Kivity
        .region_nop = mem_add,
1779 ac1970fb Avi Kivity
        .priority = 0,
1780 ac1970fb Avi Kivity
    };
1781 ac1970fb Avi Kivity
    as->dispatch = d;
1782 ac1970fb Avi Kivity
    memory_listener_register(&d->listener, as);
1783 ac1970fb Avi Kivity
}
1784 ac1970fb Avi Kivity
1785 83f3c251 Avi Kivity
void address_space_destroy_dispatch(AddressSpace *as)
1786 83f3c251 Avi Kivity
{
1787 83f3c251 Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
1788 83f3c251 Avi Kivity
1789 83f3c251 Avi Kivity
    memory_listener_unregister(&d->listener);
1790 83f3c251 Avi Kivity
    destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1791 83f3c251 Avi Kivity
    g_free(d);
1792 83f3c251 Avi Kivity
    as->dispatch = NULL;
1793 83f3c251 Avi Kivity
}
1794 83f3c251 Avi Kivity
1795 62152b8a Avi Kivity
static void memory_map_init(void)
1796 62152b8a Avi Kivity
{
1797 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
1798 8417cebf Avi Kivity
    memory_region_init(system_memory, "system", INT64_MAX);
1799 2673a5da Avi Kivity
    address_space_init(&address_space_memory, system_memory);
1800 2673a5da Avi Kivity
    address_space_memory.name = "memory";
1801 309cb471 Avi Kivity
1802 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
1803 309cb471 Avi Kivity
    memory_region_init(system_io, "io", 65536);
1804 2673a5da Avi Kivity
    address_space_init(&address_space_io, system_io);
1805 2673a5da Avi Kivity
    address_space_io.name = "I/O";
1806 93632747 Avi Kivity
1807 f6790af6 Avi Kivity
    memory_listener_register(&core_memory_listener, &address_space_memory);
1808 f6790af6 Avi Kivity
    memory_listener_register(&io_memory_listener, &address_space_io);
1809 f6790af6 Avi Kivity
    memory_listener_register(&tcg_memory_listener, &address_space_memory);
1810 9e11908f Peter Maydell
1811 9e11908f Peter Maydell
    dma_context_init(&dma_context_memory, &address_space_memory,
1812 9e11908f Peter Maydell
                     NULL, NULL, NULL);
1813 62152b8a Avi Kivity
}
1814 62152b8a Avi Kivity
1815 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
1816 62152b8a Avi Kivity
{
1817 62152b8a Avi Kivity
    return system_memory;
1818 62152b8a Avi Kivity
}
1819 62152b8a Avi Kivity
1820 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
1821 309cb471 Avi Kivity
{
1822 309cb471 Avi Kivity
    return system_io;
1823 309cb471 Avi Kivity
}
1824 309cb471 Avi Kivity
1825 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
1826 e2eef170 pbrook
1827 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
1828 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
1829 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1830 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
1831 13eb76e0 bellard
{
1832 13eb76e0 bellard
    int l, flags;
1833 13eb76e0 bellard
    target_ulong page;
1834 53a5960a pbrook
    void * p;
1835 13eb76e0 bellard
1836 13eb76e0 bellard
    while (len > 0) {
1837 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
1838 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1839 13eb76e0 bellard
        if (l > len)
1840 13eb76e0 bellard
            l = len;
1841 13eb76e0 bellard
        flags = page_get_flags(page);
1842 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
1843 a68fe89c Paul Brook
            return -1;
1844 13eb76e0 bellard
        if (is_write) {
1845 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
1846 a68fe89c Paul Brook
                return -1;
1847 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1848 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1849 a68fe89c Paul Brook
                return -1;
1850 72fb7daa aurel32
            memcpy(p, buf, l);
1851 72fb7daa aurel32
            unlock_user(p, addr, l);
1852 13eb76e0 bellard
        } else {
1853 13eb76e0 bellard
            if (!(flags & PAGE_READ))
1854 a68fe89c Paul Brook
                return -1;
1855 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1856 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1857 a68fe89c Paul Brook
                return -1;
1858 72fb7daa aurel32
            memcpy(buf, p, l);
1859 5b257578 aurel32
            unlock_user(p, addr, 0);
1860 13eb76e0 bellard
        }
1861 13eb76e0 bellard
        len -= l;
1862 13eb76e0 bellard
        buf += l;
1863 13eb76e0 bellard
        addr += l;
1864 13eb76e0 bellard
    }
1865 a68fe89c Paul Brook
    return 0;
1866 13eb76e0 bellard
}
1867 8df1cd07 bellard
1868 13eb76e0 bellard
#else
1869 51d7a9eb Anthony PERARD
1870 a8170e5e Avi Kivity
static void invalidate_and_set_dirty(hwaddr addr,
1871 a8170e5e Avi Kivity
                                     hwaddr length)
1872 51d7a9eb Anthony PERARD
{
1873 51d7a9eb Anthony PERARD
    if (!cpu_physical_memory_is_dirty(addr)) {
1874 51d7a9eb Anthony PERARD
        /* invalidate code */
1875 51d7a9eb Anthony PERARD
        tb_invalidate_phys_page_range(addr, addr + length, 0);
1876 51d7a9eb Anthony PERARD
        /* set dirty bit */
1877 51d7a9eb Anthony PERARD
        cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1878 51d7a9eb Anthony PERARD
    }
1879 e226939d Anthony PERARD
    xen_modified_memory(addr, length);
1880 51d7a9eb Anthony PERARD
}
1881 51d7a9eb Anthony PERARD
1882 a8170e5e Avi Kivity
void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1883 ac1970fb Avi Kivity
                      int len, bool is_write)
1884 13eb76e0 bellard
{
1885 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
1886 37ec01d4 Avi Kivity
    int l;
1887 13eb76e0 bellard
    uint8_t *ptr;
1888 13eb76e0 bellard
    uint32_t val;
1889 a8170e5e Avi Kivity
    hwaddr page;
1890 f3705d53 Avi Kivity
    MemoryRegionSection *section;
1891 3b46e624 ths
1892 13eb76e0 bellard
    while (len > 0) {
1893 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
1894 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1895 13eb76e0 bellard
        if (l > len)
1896 13eb76e0 bellard
            l = len;
1897 ac1970fb Avi Kivity
        section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1898 3b46e624 ths
1899 13eb76e0 bellard
        if (is_write) {
1900 f3705d53 Avi Kivity
            if (!memory_region_is_ram(section->mr)) {
1901 a8170e5e Avi Kivity
                hwaddr addr1;
1902 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
1903 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
1904 6a00d601 bellard
                   potential bugs */
1905 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
1906 1c213d19 bellard
                    /* 32 bit write access */
1907 c27004ec bellard
                    val = ldl_p(buf);
1908 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 4);
1909 13eb76e0 bellard
                    l = 4;
1910 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
1911 1c213d19 bellard
                    /* 16 bit write access */
1912 c27004ec bellard
                    val = lduw_p(buf);
1913 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 2);
1914 13eb76e0 bellard
                    l = 2;
1915 13eb76e0 bellard
                } else {
1916 1c213d19 bellard
                    /* 8 bit write access */
1917 c27004ec bellard
                    val = ldub_p(buf);
1918 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 1);
1919 13eb76e0 bellard
                    l = 1;
1920 13eb76e0 bellard
                }
1921 f3705d53 Avi Kivity
            } else if (!section->readonly) {
1922 8ca5692d Anthony PERARD
                ram_addr_t addr1;
1923 f3705d53 Avi Kivity
                addr1 = memory_region_get_ram_addr(section->mr)
1924 cc5bea60 Blue Swirl
                    + memory_region_section_addr(section, addr);
1925 13eb76e0 bellard
                /* RAM case */
1926 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
1927 13eb76e0 bellard
                memcpy(ptr, buf, l);
1928 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
1929 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
1930 13eb76e0 bellard
            }
1931 13eb76e0 bellard
        } else {
1932 cc5bea60 Blue Swirl
            if (!(memory_region_is_ram(section->mr) ||
1933 cc5bea60 Blue Swirl
                  memory_region_is_romd(section->mr))) {
1934 a8170e5e Avi Kivity
                hwaddr addr1;
1935 13eb76e0 bellard
                /* I/O case */
1936 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
1937 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
1938 13eb76e0 bellard
                    /* 32 bit read access */
1939 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 4);
1940 c27004ec bellard
                    stl_p(buf, val);
1941 13eb76e0 bellard
                    l = 4;
1942 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
1943 13eb76e0 bellard
                    /* 16 bit read access */
1944 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 2);
1945 c27004ec bellard
                    stw_p(buf, val);
1946 13eb76e0 bellard
                    l = 2;
1947 13eb76e0 bellard
                } else {
1948 1c213d19 bellard
                    /* 8 bit read access */
1949 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 1);
1950 c27004ec bellard
                    stb_p(buf, val);
1951 13eb76e0 bellard
                    l = 1;
1952 13eb76e0 bellard
                }
1953 13eb76e0 bellard
            } else {
1954 13eb76e0 bellard
                /* RAM case */
1955 0a1b357f Anthony PERARD
                ptr = qemu_get_ram_ptr(section->mr->ram_addr
1956 cc5bea60 Blue Swirl
                                       + memory_region_section_addr(section,
1957 cc5bea60 Blue Swirl
                                                                    addr));
1958 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
1959 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
1960 13eb76e0 bellard
            }
1961 13eb76e0 bellard
        }
1962 13eb76e0 bellard
        len -= l;
1963 13eb76e0 bellard
        buf += l;
1964 13eb76e0 bellard
        addr += l;
1965 13eb76e0 bellard
    }
1966 13eb76e0 bellard
}
1967 8df1cd07 bellard
1968 a8170e5e Avi Kivity
void address_space_write(AddressSpace *as, hwaddr addr,
1969 ac1970fb Avi Kivity
                         const uint8_t *buf, int len)
1970 ac1970fb Avi Kivity
{
1971 ac1970fb Avi Kivity
    address_space_rw(as, addr, (uint8_t *)buf, len, true);
1972 ac1970fb Avi Kivity
}
1973 ac1970fb Avi Kivity
1974 ac1970fb Avi Kivity
/**
1975 ac1970fb Avi Kivity
 * address_space_read: read from an address space.
1976 ac1970fb Avi Kivity
 *
1977 ac1970fb Avi Kivity
 * @as: #AddressSpace to be accessed
1978 ac1970fb Avi Kivity
 * @addr: address within that address space
1979 ac1970fb Avi Kivity
 * @buf: buffer with the data transferred
1980 ac1970fb Avi Kivity
 */
1981 a8170e5e Avi Kivity
void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1982 ac1970fb Avi Kivity
{
1983 ac1970fb Avi Kivity
    address_space_rw(as, addr, buf, len, false);
1984 ac1970fb Avi Kivity
}
1985 ac1970fb Avi Kivity
1986 ac1970fb Avi Kivity
1987 a8170e5e Avi Kivity
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1988 ac1970fb Avi Kivity
                            int len, int is_write)
1989 ac1970fb Avi Kivity
{
1990 ac1970fb Avi Kivity
    return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1991 ac1970fb Avi Kivity
}
1992 ac1970fb Avi Kivity
1993 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
1994 a8170e5e Avi Kivity
void cpu_physical_memory_write_rom(hwaddr addr,
1995 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
1996 d0ecd2aa bellard
{
1997 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = address_space_memory.dispatch;
1998 d0ecd2aa bellard
    int l;
1999 d0ecd2aa bellard
    uint8_t *ptr;
2000 a8170e5e Avi Kivity
    hwaddr page;
2001 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2002 3b46e624 ths
2003 d0ecd2aa bellard
    while (len > 0) {
2004 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
2005 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2006 d0ecd2aa bellard
        if (l > len)
2007 d0ecd2aa bellard
            l = len;
2008 ac1970fb Avi Kivity
        section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2009 3b46e624 ths
2010 cc5bea60 Blue Swirl
        if (!(memory_region_is_ram(section->mr) ||
2011 cc5bea60 Blue Swirl
              memory_region_is_romd(section->mr))) {
2012 d0ecd2aa bellard
            /* do nothing */
2013 d0ecd2aa bellard
        } else {
2014 d0ecd2aa bellard
            unsigned long addr1;
2015 f3705d53 Avi Kivity
            addr1 = memory_region_get_ram_addr(section->mr)
2016 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
2017 d0ecd2aa bellard
            /* ROM/RAM case */
2018 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
2019 d0ecd2aa bellard
            memcpy(ptr, buf, l);
2020 51d7a9eb Anthony PERARD
            invalidate_and_set_dirty(addr1, l);
2021 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
2022 d0ecd2aa bellard
        }
2023 d0ecd2aa bellard
        len -= l;
2024 d0ecd2aa bellard
        buf += l;
2025 d0ecd2aa bellard
        addr += l;
2026 d0ecd2aa bellard
    }
2027 d0ecd2aa bellard
}
2028 d0ecd2aa bellard
2029 6d16c2f8 aliguori
typedef struct {
2030 6d16c2f8 aliguori
    void *buffer;
2031 a8170e5e Avi Kivity
    hwaddr addr;
2032 a8170e5e Avi Kivity
    hwaddr len;
2033 6d16c2f8 aliguori
} BounceBuffer;
2034 6d16c2f8 aliguori
2035 6d16c2f8 aliguori
static BounceBuffer bounce;
2036 6d16c2f8 aliguori
2037 ba223c29 aliguori
typedef struct MapClient {
2038 ba223c29 aliguori
    void *opaque;
2039 ba223c29 aliguori
    void (*callback)(void *opaque);
2040 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
2041 ba223c29 aliguori
} MapClient;
2042 ba223c29 aliguori
2043 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
2045 ba223c29 aliguori
2046 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2047 ba223c29 aliguori
{
2048 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
2049 ba223c29 aliguori
2050 ba223c29 aliguori
    client->opaque = opaque;
2051 ba223c29 aliguori
    client->callback = callback;
2052 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
2053 ba223c29 aliguori
    return client;
2054 ba223c29 aliguori
}
2055 ba223c29 aliguori
2056 8b9c99d9 Blue Swirl
static void cpu_unregister_map_client(void *_client)
2057 ba223c29 aliguori
{
2058 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
2059 ba223c29 aliguori
2060 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
2061 7267c094 Anthony Liguori
    g_free(client);
2062 ba223c29 aliguori
}
2063 ba223c29 aliguori
2064 ba223c29 aliguori
static void cpu_notify_map_clients(void)
2065 ba223c29 aliguori
{
2066 ba223c29 aliguori
    MapClient *client;
2067 ba223c29 aliguori
2068 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
2069 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
2070 ba223c29 aliguori
        client->callback(client->opaque);
2071 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
2072 ba223c29 aliguori
    }
2073 ba223c29 aliguori
}
2074 ba223c29 aliguori
2075 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
2076 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
2077 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
2078 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
2079 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
2080 ba223c29 aliguori
 * likely to succeed.
2081 6d16c2f8 aliguori
 */
2082 ac1970fb Avi Kivity
void *address_space_map(AddressSpace *as,
2083 a8170e5e Avi Kivity
                        hwaddr addr,
2084 a8170e5e Avi Kivity
                        hwaddr *plen,
2085 ac1970fb Avi Kivity
                        bool is_write)
2086 6d16c2f8 aliguori
{
2087 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
2088 a8170e5e Avi Kivity
    hwaddr len = *plen;
2089 a8170e5e Avi Kivity
    hwaddr todo = 0;
2090 6d16c2f8 aliguori
    int l;
2091 a8170e5e Avi Kivity
    hwaddr page;
2092 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2093 f15fbc4b Anthony PERARD
    ram_addr_t raddr = RAM_ADDR_MAX;
2094 8ab934f9 Stefano Stabellini
    ram_addr_t rlen;
2095 8ab934f9 Stefano Stabellini
    void *ret;
2096 6d16c2f8 aliguori
2097 6d16c2f8 aliguori
    while (len > 0) {
2098 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
2099 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
2100 6d16c2f8 aliguori
        if (l > len)
2101 6d16c2f8 aliguori
            l = len;
2102 ac1970fb Avi Kivity
        section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2103 6d16c2f8 aliguori
2104 f3705d53 Avi Kivity
        if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
2105 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
2106 6d16c2f8 aliguori
                break;
2107 6d16c2f8 aliguori
            }
2108 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2109 6d16c2f8 aliguori
            bounce.addr = addr;
2110 6d16c2f8 aliguori
            bounce.len = l;
2111 6d16c2f8 aliguori
            if (!is_write) {
2112 ac1970fb Avi Kivity
                address_space_read(as, addr, bounce.buffer, l);
2113 6d16c2f8 aliguori
            }
2114 38bee5dc Stefano Stabellini
2115 38bee5dc Stefano Stabellini
            *plen = l;
2116 38bee5dc Stefano Stabellini
            return bounce.buffer;
2117 6d16c2f8 aliguori
        }
2118 8ab934f9 Stefano Stabellini
        if (!todo) {
2119 f3705d53 Avi Kivity
            raddr = memory_region_get_ram_addr(section->mr)
2120 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
2121 8ab934f9 Stefano Stabellini
        }
2122 6d16c2f8 aliguori
2123 6d16c2f8 aliguori
        len -= l;
2124 6d16c2f8 aliguori
        addr += l;
2125 38bee5dc Stefano Stabellini
        todo += l;
2126 6d16c2f8 aliguori
    }
2127 8ab934f9 Stefano Stabellini
    rlen = todo;
2128 8ab934f9 Stefano Stabellini
    ret = qemu_ram_ptr_length(raddr, &rlen);
2129 8ab934f9 Stefano Stabellini
    *plen = rlen;
2130 8ab934f9 Stefano Stabellini
    return ret;
2131 6d16c2f8 aliguori
}
2132 6d16c2f8 aliguori
2133 ac1970fb Avi Kivity
/* Unmaps a memory region previously mapped by address_space_map().
2134 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
2135 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
2136 6d16c2f8 aliguori
 */
2137 a8170e5e Avi Kivity
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2138 a8170e5e Avi Kivity
                         int is_write, hwaddr access_len)
2139 6d16c2f8 aliguori
{
2140 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
2141 6d16c2f8 aliguori
        if (is_write) {
2142 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2143 6d16c2f8 aliguori
            while (access_len) {
2144 6d16c2f8 aliguori
                unsigned l;
2145 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
2146 6d16c2f8 aliguori
                if (l > access_len)
2147 6d16c2f8 aliguori
                    l = access_len;
2148 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
2149 6d16c2f8 aliguori
                addr1 += l;
2150 6d16c2f8 aliguori
                access_len -= l;
2151 6d16c2f8 aliguori
            }
2152 6d16c2f8 aliguori
        }
2153 868bb33f Jan Kiszka
        if (xen_enabled()) {
2154 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
2155 050a0ddf Anthony PERARD
        }
2156 6d16c2f8 aliguori
        return;
2157 6d16c2f8 aliguori
    }
2158 6d16c2f8 aliguori
    if (is_write) {
2159 ac1970fb Avi Kivity
        address_space_write(as, bounce.addr, bounce.buffer, access_len);
2160 6d16c2f8 aliguori
    }
2161 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
2162 6d16c2f8 aliguori
    bounce.buffer = NULL;
2163 ba223c29 aliguori
    cpu_notify_map_clients();
2164 6d16c2f8 aliguori
}
2165 d0ecd2aa bellard
2166 a8170e5e Avi Kivity
void *cpu_physical_memory_map(hwaddr addr,
2167 a8170e5e Avi Kivity
                              hwaddr *plen,
2168 ac1970fb Avi Kivity
                              int is_write)
2169 ac1970fb Avi Kivity
{
2170 ac1970fb Avi Kivity
    return address_space_map(&address_space_memory, addr, plen, is_write);
2171 ac1970fb Avi Kivity
}
2172 ac1970fb Avi Kivity
2173 a8170e5e Avi Kivity
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2174 a8170e5e Avi Kivity
                               int is_write, hwaddr access_len)
2175 ac1970fb Avi Kivity
{
2176 ac1970fb Avi Kivity
    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2177 ac1970fb Avi Kivity
}
2178 ac1970fb Avi Kivity
2179 8df1cd07 bellard
/* warning: addr must be aligned */
2180 a8170e5e Avi Kivity
static inline uint32_t ldl_phys_internal(hwaddr addr,
2181 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2182 8df1cd07 bellard
{
2183 8df1cd07 bellard
    uint8_t *ptr;
2184 8df1cd07 bellard
    uint32_t val;
2185 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2186 8df1cd07 bellard
2187 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2188 3b46e624 ths
2189 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
2190 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
2191 8df1cd07 bellard
        /* I/O case */
2192 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2193 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
2194 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2195 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2196 1e78bcc1 Alexander Graf
            val = bswap32(val);
2197 1e78bcc1 Alexander Graf
        }
2198 1e78bcc1 Alexander Graf
#else
2199 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2200 1e78bcc1 Alexander Graf
            val = bswap32(val);
2201 1e78bcc1 Alexander Graf
        }
2202 1e78bcc1 Alexander Graf
#endif
2203 8df1cd07 bellard
    } else {
2204 8df1cd07 bellard
        /* RAM case */
2205 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2206 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2207 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2208 1e78bcc1 Alexander Graf
        switch (endian) {
2209 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2210 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
2211 1e78bcc1 Alexander Graf
            break;
2212 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2213 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
2214 1e78bcc1 Alexander Graf
            break;
2215 1e78bcc1 Alexander Graf
        default:
2216 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
2217 1e78bcc1 Alexander Graf
            break;
2218 1e78bcc1 Alexander Graf
        }
2219 8df1cd07 bellard
    }
2220 8df1cd07 bellard
    return val;
2221 8df1cd07 bellard
}
2222 8df1cd07 bellard
2223 a8170e5e Avi Kivity
uint32_t ldl_phys(hwaddr addr)
2224 1e78bcc1 Alexander Graf
{
2225 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2226 1e78bcc1 Alexander Graf
}
2227 1e78bcc1 Alexander Graf
2228 a8170e5e Avi Kivity
uint32_t ldl_le_phys(hwaddr addr)
2229 1e78bcc1 Alexander Graf
{
2230 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2231 1e78bcc1 Alexander Graf
}
2232 1e78bcc1 Alexander Graf
2233 a8170e5e Avi Kivity
uint32_t ldl_be_phys(hwaddr addr)
2234 1e78bcc1 Alexander Graf
{
2235 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2236 1e78bcc1 Alexander Graf
}
2237 1e78bcc1 Alexander Graf
2238 84b7b8e7 bellard
/* warning: addr must be aligned */
2239 a8170e5e Avi Kivity
static inline uint64_t ldq_phys_internal(hwaddr addr,
2240 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2241 84b7b8e7 bellard
{
2242 84b7b8e7 bellard
    uint8_t *ptr;
2243 84b7b8e7 bellard
    uint64_t val;
2244 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2245 84b7b8e7 bellard
2246 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2247 3b46e624 ths
2248 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
2249 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
2250 84b7b8e7 bellard
        /* I/O case */
2251 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2252 1e78bcc1 Alexander Graf
2253 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
2254 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
2255 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
2256 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4) << 32;
2257 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4);
2258 84b7b8e7 bellard
#else
2259 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
2260 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4) << 32;
2261 84b7b8e7 bellard
#endif
2262 84b7b8e7 bellard
    } else {
2263 84b7b8e7 bellard
        /* RAM case */
2264 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2265 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2266 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2267 1e78bcc1 Alexander Graf
        switch (endian) {
2268 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2269 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
2270 1e78bcc1 Alexander Graf
            break;
2271 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2272 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
2273 1e78bcc1 Alexander Graf
            break;
2274 1e78bcc1 Alexander Graf
        default:
2275 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
2276 1e78bcc1 Alexander Graf
            break;
2277 1e78bcc1 Alexander Graf
        }
2278 84b7b8e7 bellard
    }
2279 84b7b8e7 bellard
    return val;
2280 84b7b8e7 bellard
}
2281 84b7b8e7 bellard
2282 a8170e5e Avi Kivity
uint64_t ldq_phys(hwaddr addr)
2283 1e78bcc1 Alexander Graf
{
2284 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2285 1e78bcc1 Alexander Graf
}
2286 1e78bcc1 Alexander Graf
2287 a8170e5e Avi Kivity
uint64_t ldq_le_phys(hwaddr addr)
2288 1e78bcc1 Alexander Graf
{
2289 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2290 1e78bcc1 Alexander Graf
}
2291 1e78bcc1 Alexander Graf
2292 a8170e5e Avi Kivity
uint64_t ldq_be_phys(hwaddr addr)
2293 1e78bcc1 Alexander Graf
{
2294 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2295 1e78bcc1 Alexander Graf
}
2296 1e78bcc1 Alexander Graf
2297 aab33094 bellard
/* XXX: optimize */
2298 a8170e5e Avi Kivity
uint32_t ldub_phys(hwaddr addr)
2299 aab33094 bellard
{
2300 aab33094 bellard
    uint8_t val;
2301 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
2302 aab33094 bellard
    return val;
2303 aab33094 bellard
}
2304 aab33094 bellard
2305 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2306 a8170e5e Avi Kivity
static inline uint32_t lduw_phys_internal(hwaddr addr,
2307 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
2308 aab33094 bellard
{
2309 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2310 733f0b02 Michael S. Tsirkin
    uint64_t val;
2311 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2312 733f0b02 Michael S. Tsirkin
2313 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2314 733f0b02 Michael S. Tsirkin
2315 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
2316 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
2317 733f0b02 Michael S. Tsirkin
        /* I/O case */
2318 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2319 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 2);
2320 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2321 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2322 1e78bcc1 Alexander Graf
            val = bswap16(val);
2323 1e78bcc1 Alexander Graf
        }
2324 1e78bcc1 Alexander Graf
#else
2325 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2326 1e78bcc1 Alexander Graf
            val = bswap16(val);
2327 1e78bcc1 Alexander Graf
        }
2328 1e78bcc1 Alexander Graf
#endif
2329 733f0b02 Michael S. Tsirkin
    } else {
2330 733f0b02 Michael S. Tsirkin
        /* RAM case */
2331 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2332 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2333 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2334 1e78bcc1 Alexander Graf
        switch (endian) {
2335 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2336 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
2337 1e78bcc1 Alexander Graf
            break;
2338 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2339 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
2340 1e78bcc1 Alexander Graf
            break;
2341 1e78bcc1 Alexander Graf
        default:
2342 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
2343 1e78bcc1 Alexander Graf
            break;
2344 1e78bcc1 Alexander Graf
        }
2345 733f0b02 Michael S. Tsirkin
    }
2346 733f0b02 Michael S. Tsirkin
    return val;
2347 aab33094 bellard
}
2348 aab33094 bellard
2349 a8170e5e Avi Kivity
uint32_t lduw_phys(hwaddr addr)
2350 1e78bcc1 Alexander Graf
{
2351 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2352 1e78bcc1 Alexander Graf
}
2353 1e78bcc1 Alexander Graf
2354 a8170e5e Avi Kivity
uint32_t lduw_le_phys(hwaddr addr)
2355 1e78bcc1 Alexander Graf
{
2356 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2357 1e78bcc1 Alexander Graf
}
2358 1e78bcc1 Alexander Graf
2359 a8170e5e Avi Kivity
uint32_t lduw_be_phys(hwaddr addr)
2360 1e78bcc1 Alexander Graf
{
2361 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2362 1e78bcc1 Alexander Graf
}
2363 1e78bcc1 Alexander Graf
2364 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
2365 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
2366 8df1cd07 bellard
   bits are used to track modified PTEs */
2367 a8170e5e Avi Kivity
void stl_phys_notdirty(hwaddr addr, uint32_t val)
2368 8df1cd07 bellard
{
2369 8df1cd07 bellard
    uint8_t *ptr;
2370 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2371 8df1cd07 bellard
2372 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2373 3b46e624 ths
2374 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2375 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2376 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2377 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2378 06ef3525 Avi Kivity
        }
2379 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
2380 8df1cd07 bellard
    } else {
2381 f3705d53 Avi Kivity
        unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
2382 06ef3525 Avi Kivity
                               & TARGET_PAGE_MASK)
2383 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
2384 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2385 8df1cd07 bellard
        stl_p(ptr, val);
2386 74576198 aliguori
2387 74576198 aliguori
        if (unlikely(in_migration)) {
2388 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
2389 74576198 aliguori
                /* invalidate code */
2390 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2391 74576198 aliguori
                /* set dirty bit */
2392 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
2393 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
2394 74576198 aliguori
            }
2395 74576198 aliguori
        }
2396 8df1cd07 bellard
    }
2397 8df1cd07 bellard
}
2398 8df1cd07 bellard
2399 a8170e5e Avi Kivity
void stq_phys_notdirty(hwaddr addr, uint64_t val)
2400 bc98a7ef j_mayer
{
2401 bc98a7ef j_mayer
    uint8_t *ptr;
2402 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2403 bc98a7ef j_mayer
2404 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2405 3b46e624 ths
2406 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2407 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2408 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2409 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2410 06ef3525 Avi Kivity
        }
2411 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
2412 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val >> 32, 4);
2413 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
2414 bc98a7ef j_mayer
#else
2415 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, (uint32_t)val, 4);
2416 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, val >> 32, 4);
2417 bc98a7ef j_mayer
#endif
2418 bc98a7ef j_mayer
    } else {
2419 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2420 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2421 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2422 bc98a7ef j_mayer
        stq_p(ptr, val);
2423 bc98a7ef j_mayer
    }
2424 bc98a7ef j_mayer
}
2425 bc98a7ef j_mayer
2426 8df1cd07 bellard
/* warning: addr must be aligned */
2427 a8170e5e Avi Kivity
static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2428 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2429 8df1cd07 bellard
{
2430 8df1cd07 bellard
    uint8_t *ptr;
2431 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2432 8df1cd07 bellard
2433 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2434 3b46e624 ths
2435 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2436 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2437 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2438 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2439 06ef3525 Avi Kivity
        }
2440 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2441 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2442 1e78bcc1 Alexander Graf
            val = bswap32(val);
2443 1e78bcc1 Alexander Graf
        }
2444 1e78bcc1 Alexander Graf
#else
2445 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2446 1e78bcc1 Alexander Graf
            val = bswap32(val);
2447 1e78bcc1 Alexander Graf
        }
2448 1e78bcc1 Alexander Graf
#endif
2449 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
2450 8df1cd07 bellard
    } else {
2451 8df1cd07 bellard
        unsigned long addr1;
2452 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2453 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
2454 8df1cd07 bellard
        /* RAM case */
2455 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2456 1e78bcc1 Alexander Graf
        switch (endian) {
2457 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2458 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
2459 1e78bcc1 Alexander Graf
            break;
2460 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2461 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
2462 1e78bcc1 Alexander Graf
            break;
2463 1e78bcc1 Alexander Graf
        default:
2464 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
2465 1e78bcc1 Alexander Graf
            break;
2466 1e78bcc1 Alexander Graf
        }
2467 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 4);
2468 8df1cd07 bellard
    }
2469 8df1cd07 bellard
}
2470 8df1cd07 bellard
2471 a8170e5e Avi Kivity
void stl_phys(hwaddr addr, uint32_t val)
2472 1e78bcc1 Alexander Graf
{
2473 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2474 1e78bcc1 Alexander Graf
}
2475 1e78bcc1 Alexander Graf
2476 a8170e5e Avi Kivity
void stl_le_phys(hwaddr addr, uint32_t val)
2477 1e78bcc1 Alexander Graf
{
2478 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2479 1e78bcc1 Alexander Graf
}
2480 1e78bcc1 Alexander Graf
2481 a8170e5e Avi Kivity
void stl_be_phys(hwaddr addr, uint32_t val)
2482 1e78bcc1 Alexander Graf
{
2483 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2484 1e78bcc1 Alexander Graf
}
2485 1e78bcc1 Alexander Graf
2486 aab33094 bellard
/* XXX: optimize */
2487 a8170e5e Avi Kivity
void stb_phys(hwaddr addr, uint32_t val)
2488 aab33094 bellard
{
2489 aab33094 bellard
    uint8_t v = val;
2490 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
2491 aab33094 bellard
}
2492 aab33094 bellard
2493 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2494 a8170e5e Avi Kivity
static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2495 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2496 aab33094 bellard
{
2497 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2498 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2499 733f0b02 Michael S. Tsirkin
2500 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2501 733f0b02 Michael S. Tsirkin
2502 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2503 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2504 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2505 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2506 06ef3525 Avi Kivity
        }
2507 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2508 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2509 1e78bcc1 Alexander Graf
            val = bswap16(val);
2510 1e78bcc1 Alexander Graf
        }
2511 1e78bcc1 Alexander Graf
#else
2512 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2513 1e78bcc1 Alexander Graf
            val = bswap16(val);
2514 1e78bcc1 Alexander Graf
        }
2515 1e78bcc1 Alexander Graf
#endif
2516 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 2);
2517 733f0b02 Michael S. Tsirkin
    } else {
2518 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
2519 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2520 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
2521 733f0b02 Michael S. Tsirkin
        /* RAM case */
2522 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
2523 1e78bcc1 Alexander Graf
        switch (endian) {
2524 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2525 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
2526 1e78bcc1 Alexander Graf
            break;
2527 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2528 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
2529 1e78bcc1 Alexander Graf
            break;
2530 1e78bcc1 Alexander Graf
        default:
2531 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
2532 1e78bcc1 Alexander Graf
            break;
2533 1e78bcc1 Alexander Graf
        }
2534 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 2);
2535 733f0b02 Michael S. Tsirkin
    }
2536 aab33094 bellard
}
2537 aab33094 bellard
2538 a8170e5e Avi Kivity
void stw_phys(hwaddr addr, uint32_t val)
2539 1e78bcc1 Alexander Graf
{
2540 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2541 1e78bcc1 Alexander Graf
}
2542 1e78bcc1 Alexander Graf
2543 a8170e5e Avi Kivity
void stw_le_phys(hwaddr addr, uint32_t val)
2544 1e78bcc1 Alexander Graf
{
2545 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2546 1e78bcc1 Alexander Graf
}
2547 1e78bcc1 Alexander Graf
2548 a8170e5e Avi Kivity
void stw_be_phys(hwaddr addr, uint32_t val)
2549 1e78bcc1 Alexander Graf
{
2550 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2551 1e78bcc1 Alexander Graf
}
2552 1e78bcc1 Alexander Graf
2553 aab33094 bellard
/* XXX: optimize */
2554 a8170e5e Avi Kivity
void stq_phys(hwaddr addr, uint64_t val)
2555 aab33094 bellard
{
2556 aab33094 bellard
    val = tswap64(val);
2557 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
2558 aab33094 bellard
}
2559 aab33094 bellard
2560 a8170e5e Avi Kivity
void stq_le_phys(hwaddr addr, uint64_t val)
2561 1e78bcc1 Alexander Graf
{
2562 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
2563 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
2564 1e78bcc1 Alexander Graf
}
2565 1e78bcc1 Alexander Graf
2566 a8170e5e Avi Kivity
void stq_be_phys(hwaddr addr, uint64_t val)
2567 1e78bcc1 Alexander Graf
{
2568 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
2569 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
2570 1e78bcc1 Alexander Graf
}
2571 1e78bcc1 Alexander Graf
2572 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
2573 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2574 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
2575 13eb76e0 bellard
{
2576 13eb76e0 bellard
    int l;
2577 a8170e5e Avi Kivity
    hwaddr phys_addr;
2578 9b3c35e0 j_mayer
    target_ulong page;
2579 13eb76e0 bellard
2580 13eb76e0 bellard
    while (len > 0) {
2581 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2582 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
2583 13eb76e0 bellard
        /* if no physical page mapped, return an error */
2584 13eb76e0 bellard
        if (phys_addr == -1)
2585 13eb76e0 bellard
            return -1;
2586 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2587 13eb76e0 bellard
        if (l > len)
2588 13eb76e0 bellard
            l = len;
2589 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
2590 5e2972fd aliguori
        if (is_write)
2591 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
2592 5e2972fd aliguori
        else
2593 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2594 13eb76e0 bellard
        len -= l;
2595 13eb76e0 bellard
        buf += l;
2596 13eb76e0 bellard
        addr += l;
2597 13eb76e0 bellard
    }
2598 13eb76e0 bellard
    return 0;
2599 13eb76e0 bellard
}
2600 a68fe89c Paul Brook
#endif
2601 13eb76e0 bellard
2602 8e4a424b Blue Swirl
#if !defined(CONFIG_USER_ONLY)
2603 8e4a424b Blue Swirl
2604 8e4a424b Blue Swirl
/*
2605 8e4a424b Blue Swirl
 * A helper function for the _utterly broken_ virtio device model to find out if
2606 8e4a424b Blue Swirl
 * it's running on a big endian machine. Don't do this at home kids!
2607 8e4a424b Blue Swirl
 */
2608 8e4a424b Blue Swirl
bool virtio_is_big_endian(void);
2609 8e4a424b Blue Swirl
bool virtio_is_big_endian(void)
2610 8e4a424b Blue Swirl
{
2611 8e4a424b Blue Swirl
#if defined(TARGET_WORDS_BIGENDIAN)
2612 8e4a424b Blue Swirl
    return true;
2613 8e4a424b Blue Swirl
#else
2614 8e4a424b Blue Swirl
    return false;
2615 8e4a424b Blue Swirl
#endif
2616 8e4a424b Blue Swirl
}
2617 8e4a424b Blue Swirl
2618 8e4a424b Blue Swirl
#endif
2619 8e4a424b Blue Swirl
2620 76f35538 Wen Congyang
#ifndef CONFIG_USER_ONLY
2621 a8170e5e Avi Kivity
bool cpu_physical_memory_is_io(hwaddr phys_addr)
2622 76f35538 Wen Congyang
{
2623 76f35538 Wen Congyang
    MemoryRegionSection *section;
2624 76f35538 Wen Congyang
2625 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch,
2626 ac1970fb Avi Kivity
                             phys_addr >> TARGET_PAGE_BITS);
2627 76f35538 Wen Congyang
2628 76f35538 Wen Congyang
    return !(memory_region_is_ram(section->mr) ||
2629 76f35538 Wen Congyang
             memory_region_is_romd(section->mr));
2630 76f35538 Wen Congyang
}
2631 76f35538 Wen Congyang
#endif