Statistics
| Branch: | Revision:

root / exec.c @ ff667e2e

History | View | Annotate | Download (73.6 kB)

1 54936004 bellard
/*
2 5b6dd868 Blue Swirl
 *  Virtual page mapping
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 1de7afc9 Paolo Bonzini
#include "qemu/osdep.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
34 432d268c Jun Nakajima
#include "hw/xen.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
36 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
37 022c62cb Paolo Bonzini
#include "exec/memory.h"
38 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
39 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
40 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
41 53a5960a pbrook
#include <qemu.h>
42 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
43 9c17d615 Paolo Bonzini
#include "sysemu/xen-mapcache.h"
44 6506e4f9 Stefano Stabellini
#include "trace.h"
45 53a5960a pbrook
#endif
46 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
47 54936004 bellard
48 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
49 5b6dd868 Blue Swirl
#include "translate-all.h"
50 0cac1b66 Blue Swirl
51 022c62cb Paolo Bonzini
#include "exec/memory-internal.h"
52 67d95c15 Avi Kivity
53 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
54 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
55 1196be37 ths
56 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
57 9fa3e853 bellard
int phys_ram_fd;
58 74576198 aliguori
static int in_migration;
59 94a6b54f pbrook
60 a3161038 Paolo Bonzini
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
61 62152b8a Avi Kivity
62 62152b8a Avi Kivity
static MemoryRegion *system_memory;
63 309cb471 Avi Kivity
static MemoryRegion *system_io;
64 62152b8a Avi Kivity
65 f6790af6 Avi Kivity
AddressSpace address_space_io;
66 f6790af6 Avi Kivity
AddressSpace address_space_memory;
67 9e11908f Peter Maydell
DMAContext dma_context_memory;
68 2673a5da Avi Kivity
69 0e0df1e2 Avi Kivity
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
70 de712f94 Avi Kivity
static MemoryRegion io_mem_subpage_ram;
71 0e0df1e2 Avi Kivity
72 e2eef170 pbrook
#endif
73 9fa3e853 bellard
74 9349b4f9 Andreas Färber
CPUArchState *first_cpu;
75 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
76 6a00d601 bellard
   cpu_exec() */
77 9349b4f9 Andreas Färber
DEFINE_TLS(CPUArchState *,cpu_single_env);
78 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
79 bf20dc07 ths
   1 = Precise instruction counting.
80 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
81 5708fc66 Paolo Bonzini
int use_icount;
82 6a00d601 bellard
83 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
84 4346ae3e Avi Kivity
85 5312bd8b Avi Kivity
static MemoryRegionSection *phys_sections;
86 5312bd8b Avi Kivity
static unsigned phys_sections_nb, phys_sections_nb_alloc;
87 5312bd8b Avi Kivity
static uint16_t phys_section_unassigned;
88 aa102231 Avi Kivity
static uint16_t phys_section_notdirty;
89 aa102231 Avi Kivity
static uint16_t phys_section_rom;
90 aa102231 Avi Kivity
static uint16_t phys_section_watch;
91 5312bd8b Avi Kivity
92 d6f2ea22 Avi Kivity
/* Simple allocator for PhysPageEntry nodes */
93 d6f2ea22 Avi Kivity
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94 d6f2ea22 Avi Kivity
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95 d6f2ea22 Avi Kivity
96 07f07b31 Avi Kivity
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
97 d6f2ea22 Avi Kivity
98 e2eef170 pbrook
static void io_mem_init(void);
99 62152b8a Avi Kivity
static void memory_map_init(void);
100 8b9c99d9 Blue Swirl
static void *qemu_safe_ram_ptr(ram_addr_t addr);
101 e2eef170 pbrook
102 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
103 6658ffb8 pbrook
#endif
104 fd6ce8f6 bellard
105 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
106 d6f2ea22 Avi Kivity
107 f7bf5461 Avi Kivity
static void phys_map_node_reserve(unsigned nodes)
108 d6f2ea22 Avi Kivity
{
109 f7bf5461 Avi Kivity
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 d6f2ea22 Avi Kivity
        typedef PhysPageEntry Node[L2_SIZE];
111 d6f2ea22 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 f7bf5461 Avi Kivity
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 f7bf5461 Avi Kivity
                                      phys_map_nodes_nb + nodes);
114 d6f2ea22 Avi Kivity
        phys_map_nodes = g_renew(Node, phys_map_nodes,
115 d6f2ea22 Avi Kivity
                                 phys_map_nodes_nb_alloc);
116 d6f2ea22 Avi Kivity
    }
117 f7bf5461 Avi Kivity
}
118 f7bf5461 Avi Kivity
119 f7bf5461 Avi Kivity
static uint16_t phys_map_node_alloc(void)
120 f7bf5461 Avi Kivity
{
121 f7bf5461 Avi Kivity
    unsigned i;
122 f7bf5461 Avi Kivity
    uint16_t ret;
123 f7bf5461 Avi Kivity
124 f7bf5461 Avi Kivity
    ret = phys_map_nodes_nb++;
125 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
126 f7bf5461 Avi Kivity
    assert(ret != phys_map_nodes_nb_alloc);
127 d6f2ea22 Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
128 07f07b31 Avi Kivity
        phys_map_nodes[ret][i].is_leaf = 0;
129 c19e8800 Avi Kivity
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
130 d6f2ea22 Avi Kivity
    }
131 f7bf5461 Avi Kivity
    return ret;
132 d6f2ea22 Avi Kivity
}
133 d6f2ea22 Avi Kivity
134 d6f2ea22 Avi Kivity
static void phys_map_nodes_reset(void)
135 d6f2ea22 Avi Kivity
{
136 d6f2ea22 Avi Kivity
    phys_map_nodes_nb = 0;
137 d6f2ea22 Avi Kivity
}
138 d6f2ea22 Avi Kivity
139 92e873b9 bellard
140 a8170e5e Avi Kivity
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 a8170e5e Avi Kivity
                                hwaddr *nb, uint16_t leaf,
142 2999097b Avi Kivity
                                int level)
143 f7bf5461 Avi Kivity
{
144 f7bf5461 Avi Kivity
    PhysPageEntry *p;
145 f7bf5461 Avi Kivity
    int i;
146 a8170e5e Avi Kivity
    hwaddr step = (hwaddr)1 << (level * L2_BITS);
147 108c49b8 bellard
148 07f07b31 Avi Kivity
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
149 c19e8800 Avi Kivity
        lp->ptr = phys_map_node_alloc();
150 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
151 f7bf5461 Avi Kivity
        if (level == 0) {
152 f7bf5461 Avi Kivity
            for (i = 0; i < L2_SIZE; i++) {
153 07f07b31 Avi Kivity
                p[i].is_leaf = 1;
154 c19e8800 Avi Kivity
                p[i].ptr = phys_section_unassigned;
155 4346ae3e Avi Kivity
            }
156 67c4d23c pbrook
        }
157 f7bf5461 Avi Kivity
    } else {
158 c19e8800 Avi Kivity
        p = phys_map_nodes[lp->ptr];
159 92e873b9 bellard
    }
160 2999097b Avi Kivity
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
161 f7bf5461 Avi Kivity
162 2999097b Avi Kivity
    while (*nb && lp < &p[L2_SIZE]) {
163 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
164 07f07b31 Avi Kivity
            lp->is_leaf = true;
165 c19e8800 Avi Kivity
            lp->ptr = leaf;
166 07f07b31 Avi Kivity
            *index += step;
167 07f07b31 Avi Kivity
            *nb -= step;
168 2999097b Avi Kivity
        } else {
169 2999097b Avi Kivity
            phys_page_set_level(lp, index, nb, leaf, level - 1);
170 2999097b Avi Kivity
        }
171 2999097b Avi Kivity
        ++lp;
172 f7bf5461 Avi Kivity
    }
173 f7bf5461 Avi Kivity
}
174 f7bf5461 Avi Kivity
175 ac1970fb Avi Kivity
static void phys_page_set(AddressSpaceDispatch *d,
176 a8170e5e Avi Kivity
                          hwaddr index, hwaddr nb,
177 2999097b Avi Kivity
                          uint16_t leaf)
178 f7bf5461 Avi Kivity
{
179 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
180 07f07b31 Avi Kivity
    phys_map_node_reserve(3 * P_L2_LEVELS);
181 5cd2c5b6 Richard Henderson
182 ac1970fb Avi Kivity
    phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
183 92e873b9 bellard
}
184 92e873b9 bellard
185 a8170e5e Avi Kivity
MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
186 92e873b9 bellard
{
187 ac1970fb Avi Kivity
    PhysPageEntry lp = d->phys_map;
188 31ab2b4a Avi Kivity
    PhysPageEntry *p;
189 31ab2b4a Avi Kivity
    int i;
190 31ab2b4a Avi Kivity
    uint16_t s_index = phys_section_unassigned;
191 f1f6e3b8 Avi Kivity
192 07f07b31 Avi Kivity
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
193 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
194 31ab2b4a Avi Kivity
            goto not_found;
195 31ab2b4a Avi Kivity
        }
196 c19e8800 Avi Kivity
        p = phys_map_nodes[lp.ptr];
197 31ab2b4a Avi Kivity
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
198 5312bd8b Avi Kivity
    }
199 31ab2b4a Avi Kivity
200 c19e8800 Avi Kivity
    s_index = lp.ptr;
201 31ab2b4a Avi Kivity
not_found:
202 f3705d53 Avi Kivity
    return &phys_sections[s_index];
203 f3705d53 Avi Kivity
}
204 f3705d53 Avi Kivity
205 e5548617 Blue Swirl
bool memory_region_is_unassigned(MemoryRegion *mr)
206 e5548617 Blue Swirl
{
207 e5548617 Blue Swirl
    return mr != &io_mem_ram && mr != &io_mem_rom
208 e5548617 Blue Swirl
        && mr != &io_mem_notdirty && !mr->rom_device
209 5b6dd868 Blue Swirl
        && mr != &io_mem_watch;
210 fd6ce8f6 bellard
}
211 5b6dd868 Blue Swirl
#endif
212 fd6ce8f6 bellard
213 5b6dd868 Blue Swirl
void cpu_exec_init_all(void)
214 fdbb84d1 Yeongkyoon Lee
{
215 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
216 b2a8658e Umesh Deshpande
    qemu_mutex_init(&ram_list.mutex);
217 5b6dd868 Blue Swirl
    memory_map_init();
218 5b6dd868 Blue Swirl
    io_mem_init();
219 fdbb84d1 Yeongkyoon Lee
#endif
220 5b6dd868 Blue Swirl
}
221 fdbb84d1 Yeongkyoon Lee
222 5b6dd868 Blue Swirl
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223 5b6dd868 Blue Swirl
224 5b6dd868 Blue Swirl
static int cpu_common_post_load(void *opaque, int version_id)
225 fd6ce8f6 bellard
{
226 5b6dd868 Blue Swirl
    CPUArchState *env = opaque;
227 a513fe19 bellard
228 5b6dd868 Blue Swirl
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 5b6dd868 Blue Swirl
       version_id is increased. */
230 5b6dd868 Blue Swirl
    env->interrupt_request &= ~0x01;
231 5b6dd868 Blue Swirl
    tlb_flush(env, 1);
232 5b6dd868 Blue Swirl
233 5b6dd868 Blue Swirl
    return 0;
234 a513fe19 bellard
}
235 7501267e bellard
236 5b6dd868 Blue Swirl
static const VMStateDescription vmstate_cpu_common = {
237 5b6dd868 Blue Swirl
    .name = "cpu_common",
238 5b6dd868 Blue Swirl
    .version_id = 1,
239 5b6dd868 Blue Swirl
    .minimum_version_id = 1,
240 5b6dd868 Blue Swirl
    .minimum_version_id_old = 1,
241 5b6dd868 Blue Swirl
    .post_load = cpu_common_post_load,
242 5b6dd868 Blue Swirl
    .fields      = (VMStateField []) {
243 5b6dd868 Blue Swirl
        VMSTATE_UINT32(halted, CPUArchState),
244 5b6dd868 Blue Swirl
        VMSTATE_UINT32(interrupt_request, CPUArchState),
245 5b6dd868 Blue Swirl
        VMSTATE_END_OF_LIST()
246 5b6dd868 Blue Swirl
    }
247 5b6dd868 Blue Swirl
};
248 5b6dd868 Blue Swirl
#endif
249 ea041c0e bellard
250 5b6dd868 Blue Swirl
CPUArchState *qemu_get_cpu(int cpu)
251 ea041c0e bellard
{
252 5b6dd868 Blue Swirl
    CPUArchState *env = first_cpu;
253 ea041c0e bellard
254 5b6dd868 Blue Swirl
    while (env) {
255 5b6dd868 Blue Swirl
        if (env->cpu_index == cpu)
256 5b6dd868 Blue Swirl
            break;
257 5b6dd868 Blue Swirl
        env = env->next_cpu;
258 ea041c0e bellard
    }
259 5b6dd868 Blue Swirl
260 5b6dd868 Blue Swirl
    return env;
261 ea041c0e bellard
}
262 ea041c0e bellard
263 5b6dd868 Blue Swirl
void cpu_exec_init(CPUArchState *env)
264 ea041c0e bellard
{
265 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
266 5b6dd868 Blue Swirl
    CPUState *cpu = ENV_GET_CPU(env);
267 5b6dd868 Blue Swirl
#endif
268 5b6dd868 Blue Swirl
    CPUArchState **penv;
269 5b6dd868 Blue Swirl
    int cpu_index;
270 5b6dd868 Blue Swirl
271 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
272 5b6dd868 Blue Swirl
    cpu_list_lock();
273 5b6dd868 Blue Swirl
#endif
274 5b6dd868 Blue Swirl
    env->next_cpu = NULL;
275 5b6dd868 Blue Swirl
    penv = &first_cpu;
276 5b6dd868 Blue Swirl
    cpu_index = 0;
277 5b6dd868 Blue Swirl
    while (*penv != NULL) {
278 5b6dd868 Blue Swirl
        penv = &(*penv)->next_cpu;
279 5b6dd868 Blue Swirl
        cpu_index++;
280 5b6dd868 Blue Swirl
    }
281 5b6dd868 Blue Swirl
    env->cpu_index = cpu_index;
282 5b6dd868 Blue Swirl
    env->numa_node = 0;
283 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
284 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
285 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
286 5b6dd868 Blue Swirl
    cpu->thread_id = qemu_get_thread_id();
287 5b6dd868 Blue Swirl
#endif
288 5b6dd868 Blue Swirl
    *penv = env;
289 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
290 5b6dd868 Blue Swirl
    cpu_list_unlock();
291 5b6dd868 Blue Swirl
#endif
292 5b6dd868 Blue Swirl
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
293 5b6dd868 Blue Swirl
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
294 5b6dd868 Blue Swirl
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
295 5b6dd868 Blue Swirl
                    cpu_save, cpu_load, env);
296 5b6dd868 Blue Swirl
#endif
297 ea041c0e bellard
}
298 ea041c0e bellard
299 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
300 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
301 9349b4f9 Andreas Färber
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
302 94df27fd Paul Brook
{
303 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
304 94df27fd Paul Brook
}
305 94df27fd Paul Brook
#else
306 1e7855a5 Max Filippov
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
307 1e7855a5 Max Filippov
{
308 9d70c4b7 Max Filippov
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
309 9d70c4b7 Max Filippov
            (pc & ~TARGET_PAGE_MASK));
310 1e7855a5 Max Filippov
}
311 c27004ec bellard
#endif
312 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
313 d720b93d bellard
314 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
315 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
316 c527ee8f Paul Brook
317 c527ee8f Paul Brook
{
318 c527ee8f Paul Brook
}
319 c527ee8f Paul Brook
320 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
321 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
322 c527ee8f Paul Brook
{
323 c527ee8f Paul Brook
    return -ENOSYS;
324 c527ee8f Paul Brook
}
325 c527ee8f Paul Brook
#else
326 6658ffb8 pbrook
/* Add a watchpoint.  */
327 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
328 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
329 6658ffb8 pbrook
{
330 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
331 c0ce998e aliguori
    CPUWatchpoint *wp;
332 6658ffb8 pbrook
333 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
334 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
335 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
336 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
337 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
338 b4051334 aliguori
        return -EINVAL;
339 b4051334 aliguori
    }
340 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
341 a1d1bb31 aliguori
342 a1d1bb31 aliguori
    wp->vaddr = addr;
343 b4051334 aliguori
    wp->len_mask = len_mask;
344 a1d1bb31 aliguori
    wp->flags = flags;
345 a1d1bb31 aliguori
346 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
347 c0ce998e aliguori
    if (flags & BP_GDB)
348 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
349 c0ce998e aliguori
    else
350 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
351 6658ffb8 pbrook
352 6658ffb8 pbrook
    tlb_flush_page(env, addr);
353 a1d1bb31 aliguori
354 a1d1bb31 aliguori
    if (watchpoint)
355 a1d1bb31 aliguori
        *watchpoint = wp;
356 a1d1bb31 aliguori
    return 0;
357 6658ffb8 pbrook
}
358 6658ffb8 pbrook
359 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
360 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
361 a1d1bb31 aliguori
                          int flags)
362 6658ffb8 pbrook
{
363 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
364 a1d1bb31 aliguori
    CPUWatchpoint *wp;
365 6658ffb8 pbrook
366 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
367 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
368 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
369 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
370 6658ffb8 pbrook
            return 0;
371 6658ffb8 pbrook
        }
372 6658ffb8 pbrook
    }
373 a1d1bb31 aliguori
    return -ENOENT;
374 6658ffb8 pbrook
}
375 6658ffb8 pbrook
376 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
377 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
378 a1d1bb31 aliguori
{
379 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
380 7d03f82f edgar_igl
381 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
382 a1d1bb31 aliguori
383 7267c094 Anthony Liguori
    g_free(watchpoint);
384 a1d1bb31 aliguori
}
385 a1d1bb31 aliguori
386 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
387 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
388 a1d1bb31 aliguori
{
389 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
390 a1d1bb31 aliguori
391 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
392 a1d1bb31 aliguori
        if (wp->flags & mask)
393 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
394 c0ce998e aliguori
    }
395 7d03f82f edgar_igl
}
396 c527ee8f Paul Brook
#endif
397 7d03f82f edgar_igl
398 a1d1bb31 aliguori
/* Add a breakpoint.  */
399 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
400 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
401 4c3a88a2 bellard
{
402 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
403 c0ce998e aliguori
    CPUBreakpoint *bp;
404 3b46e624 ths
405 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
406 4c3a88a2 bellard
407 a1d1bb31 aliguori
    bp->pc = pc;
408 a1d1bb31 aliguori
    bp->flags = flags;
409 a1d1bb31 aliguori
410 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
411 c0ce998e aliguori
    if (flags & BP_GDB)
412 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
413 c0ce998e aliguori
    else
414 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
415 3b46e624 ths
416 d720b93d bellard
    breakpoint_invalidate(env, pc);
417 a1d1bb31 aliguori
418 a1d1bb31 aliguori
    if (breakpoint)
419 a1d1bb31 aliguori
        *breakpoint = bp;
420 4c3a88a2 bellard
    return 0;
421 4c3a88a2 bellard
#else
422 a1d1bb31 aliguori
    return -ENOSYS;
423 4c3a88a2 bellard
#endif
424 4c3a88a2 bellard
}
425 4c3a88a2 bellard
426 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
427 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
428 a1d1bb31 aliguori
{
429 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
430 a1d1bb31 aliguori
    CPUBreakpoint *bp;
431 a1d1bb31 aliguori
432 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
433 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
434 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
435 a1d1bb31 aliguori
            return 0;
436 a1d1bb31 aliguori
        }
437 7d03f82f edgar_igl
    }
438 a1d1bb31 aliguori
    return -ENOENT;
439 a1d1bb31 aliguori
#else
440 a1d1bb31 aliguori
    return -ENOSYS;
441 7d03f82f edgar_igl
#endif
442 7d03f82f edgar_igl
}
443 7d03f82f edgar_igl
444 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
445 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
446 4c3a88a2 bellard
{
447 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
448 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
449 d720b93d bellard
450 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
451 a1d1bb31 aliguori
452 7267c094 Anthony Liguori
    g_free(breakpoint);
453 a1d1bb31 aliguori
#endif
454 a1d1bb31 aliguori
}
455 a1d1bb31 aliguori
456 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
457 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
458 a1d1bb31 aliguori
{
459 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
460 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
461 a1d1bb31 aliguori
462 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
463 a1d1bb31 aliguori
        if (bp->flags & mask)
464 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
465 c0ce998e aliguori
    }
466 4c3a88a2 bellard
#endif
467 4c3a88a2 bellard
}
468 4c3a88a2 bellard
469 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
470 c33a346e bellard
   CPU loop after each instruction */
471 9349b4f9 Andreas Färber
void cpu_single_step(CPUArchState *env, int enabled)
472 c33a346e bellard
{
473 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
474 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
475 c33a346e bellard
        env->singlestep_enabled = enabled;
476 e22a25c9 aliguori
        if (kvm_enabled())
477 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
478 e22a25c9 aliguori
        else {
479 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
480 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
481 e22a25c9 aliguori
            tb_flush(env);
482 e22a25c9 aliguori
        }
483 c33a346e bellard
    }
484 c33a346e bellard
#endif
485 c33a346e bellard
}
486 c33a346e bellard
487 9349b4f9 Andreas Färber
void cpu_reset_interrupt(CPUArchState *env, int mask)
488 b54ad049 bellard
{
489 b54ad049 bellard
    env->interrupt_request &= ~mask;
490 b54ad049 bellard
}
491 b54ad049 bellard
492 9349b4f9 Andreas Färber
void cpu_exit(CPUArchState *env)
493 3098dba0 aurel32
{
494 3098dba0 aurel32
    env->exit_request = 1;
495 3098dba0 aurel32
    cpu_unlink_tb(env);
496 3098dba0 aurel32
}
497 3098dba0 aurel32
498 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
499 7501267e bellard
{
500 7501267e bellard
    va_list ap;
501 493ae1f0 pbrook
    va_list ap2;
502 7501267e bellard
503 7501267e bellard
    va_start(ap, fmt);
504 493ae1f0 pbrook
    va_copy(ap2, ap);
505 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
506 7501267e bellard
    vfprintf(stderr, fmt, ap);
507 7501267e bellard
    fprintf(stderr, "\n");
508 6fd2a026 Peter Maydell
    cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
509 93fcfe39 aliguori
    if (qemu_log_enabled()) {
510 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
511 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
512 93fcfe39 aliguori
        qemu_log("\n");
513 6fd2a026 Peter Maydell
        log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
514 31b1a7b4 aliguori
        qemu_log_flush();
515 93fcfe39 aliguori
        qemu_log_close();
516 924edcae balrog
    }
517 493ae1f0 pbrook
    va_end(ap2);
518 f9373291 j_mayer
    va_end(ap);
519 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
520 fd052bf6 Riku Voipio
    {
521 fd052bf6 Riku Voipio
        struct sigaction act;
522 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
523 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
524 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
525 fd052bf6 Riku Voipio
    }
526 fd052bf6 Riku Voipio
#endif
527 7501267e bellard
    abort();
528 7501267e bellard
}
529 7501267e bellard
530 9349b4f9 Andreas Färber
CPUArchState *cpu_copy(CPUArchState *env)
531 c5be9f08 ths
{
532 9349b4f9 Andreas Färber
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
533 9349b4f9 Andreas Färber
    CPUArchState *next_cpu = new_env->next_cpu;
534 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
535 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
536 5a38f081 aliguori
    CPUBreakpoint *bp;
537 5a38f081 aliguori
    CPUWatchpoint *wp;
538 5a38f081 aliguori
#endif
539 5a38f081 aliguori
540 9349b4f9 Andreas Färber
    memcpy(new_env, env, sizeof(CPUArchState));
541 5a38f081 aliguori
542 5a38f081 aliguori
    /* Preserve chaining and index. */
543 c5be9f08 ths
    new_env->next_cpu = next_cpu;
544 c5be9f08 ths
    new_env->cpu_index = cpu_index;
545 5a38f081 aliguori
546 5a38f081 aliguori
    /* Clone all break/watchpoints.
547 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
548 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
549 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
550 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
551 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
552 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
553 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
554 5a38f081 aliguori
    }
555 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
556 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
557 5a38f081 aliguori
                              wp->flags, NULL);
558 5a38f081 aliguori
    }
559 5a38f081 aliguori
#endif
560 5a38f081 aliguori
561 c5be9f08 ths
    return new_env;
562 c5be9f08 ths
}
563 c5be9f08 ths
564 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
565 d24981d3 Juan Quintela
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
566 d24981d3 Juan Quintela
                                      uintptr_t length)
567 d24981d3 Juan Quintela
{
568 d24981d3 Juan Quintela
    uintptr_t start1;
569 d24981d3 Juan Quintela
570 d24981d3 Juan Quintela
    /* we modify the TLB cache so that the dirty bit will be set again
571 d24981d3 Juan Quintela
       when accessing the range */
572 d24981d3 Juan Quintela
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
573 d24981d3 Juan Quintela
    /* Check that we don't span multiple blocks - this breaks the
574 d24981d3 Juan Quintela
       address comparisons below.  */
575 d24981d3 Juan Quintela
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
576 d24981d3 Juan Quintela
            != (end - 1) - start) {
577 d24981d3 Juan Quintela
        abort();
578 d24981d3 Juan Quintela
    }
579 d24981d3 Juan Quintela
    cpu_tlb_reset_dirty_all(start1, length);
580 d24981d3 Juan Quintela
581 d24981d3 Juan Quintela
}
582 d24981d3 Juan Quintela
583 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
584 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
585 0a962c02 bellard
                                     int dirty_flags)
586 1ccde1cb bellard
{
587 d24981d3 Juan Quintela
    uintptr_t length;
588 1ccde1cb bellard
589 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
590 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
591 1ccde1cb bellard
592 1ccde1cb bellard
    length = end - start;
593 1ccde1cb bellard
    if (length == 0)
594 1ccde1cb bellard
        return;
595 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
596 f23db169 bellard
597 d24981d3 Juan Quintela
    if (tcg_enabled()) {
598 d24981d3 Juan Quintela
        tlb_reset_dirty_range_all(start, end, length);
599 5579c7f3 pbrook
    }
600 1ccde1cb bellard
}
601 1ccde1cb bellard
602 8b9c99d9 Blue Swirl
static int cpu_physical_memory_set_dirty_tracking(int enable)
603 74576198 aliguori
{
604 f6f3fbca Michael S. Tsirkin
    int ret = 0;
605 74576198 aliguori
    in_migration = enable;
606 f6f3fbca Michael S. Tsirkin
    return ret;
607 74576198 aliguori
}
608 74576198 aliguori
609 a8170e5e Avi Kivity
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
610 e5548617 Blue Swirl
                                                   MemoryRegionSection *section,
611 e5548617 Blue Swirl
                                                   target_ulong vaddr,
612 a8170e5e Avi Kivity
                                                   hwaddr paddr,
613 e5548617 Blue Swirl
                                                   int prot,
614 e5548617 Blue Swirl
                                                   target_ulong *address)
615 e5548617 Blue Swirl
{
616 a8170e5e Avi Kivity
    hwaddr iotlb;
617 e5548617 Blue Swirl
    CPUWatchpoint *wp;
618 e5548617 Blue Swirl
619 cc5bea60 Blue Swirl
    if (memory_region_is_ram(section->mr)) {
620 e5548617 Blue Swirl
        /* Normal RAM.  */
621 e5548617 Blue Swirl
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
622 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, paddr);
623 e5548617 Blue Swirl
        if (!section->readonly) {
624 e5548617 Blue Swirl
            iotlb |= phys_section_notdirty;
625 e5548617 Blue Swirl
        } else {
626 e5548617 Blue Swirl
            iotlb |= phys_section_rom;
627 e5548617 Blue Swirl
        }
628 e5548617 Blue Swirl
    } else {
629 e5548617 Blue Swirl
        /* IO handlers are currently passed a physical address.
630 e5548617 Blue Swirl
           It would be nice to pass an offset from the base address
631 e5548617 Blue Swirl
           of that region.  This would avoid having to special case RAM,
632 e5548617 Blue Swirl
           and avoid full address decoding in every device.
633 e5548617 Blue Swirl
           We can't use the high bits of pd for this because
634 e5548617 Blue Swirl
           IO_MEM_ROMD uses these as a ram address.  */
635 e5548617 Blue Swirl
        iotlb = section - phys_sections;
636 cc5bea60 Blue Swirl
        iotlb += memory_region_section_addr(section, paddr);
637 e5548617 Blue Swirl
    }
638 e5548617 Blue Swirl
639 e5548617 Blue Swirl
    /* Make accesses to pages with watchpoints go via the
640 e5548617 Blue Swirl
       watchpoint trap routines.  */
641 e5548617 Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
642 e5548617 Blue Swirl
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
643 e5548617 Blue Swirl
            /* Avoid trapping reads of pages with a write breakpoint. */
644 e5548617 Blue Swirl
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
645 e5548617 Blue Swirl
                iotlb = phys_section_watch + paddr;
646 e5548617 Blue Swirl
                *address |= TLB_MMIO;
647 e5548617 Blue Swirl
                break;
648 e5548617 Blue Swirl
            }
649 e5548617 Blue Swirl
        }
650 e5548617 Blue Swirl
    }
651 e5548617 Blue Swirl
652 e5548617 Blue Swirl
    return iotlb;
653 e5548617 Blue Swirl
}
654 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
655 9fa3e853 bellard
656 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
657 8da3ff18 pbrook
658 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
659 c04b2b78 Paul Brook
typedef struct subpage_t {
660 70c68e44 Avi Kivity
    MemoryRegion iomem;
661 a8170e5e Avi Kivity
    hwaddr base;
662 5312bd8b Avi Kivity
    uint16_t sub_section[TARGET_PAGE_SIZE];
663 c04b2b78 Paul Brook
} subpage_t;
664 c04b2b78 Paul Brook
665 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
666 5312bd8b Avi Kivity
                             uint16_t section);
667 a8170e5e Avi Kivity
static subpage_t *subpage_init(hwaddr base);
668 5312bd8b Avi Kivity
static void destroy_page_desc(uint16_t section_index)
669 54688b1e Avi Kivity
{
670 5312bd8b Avi Kivity
    MemoryRegionSection *section = &phys_sections[section_index];
671 5312bd8b Avi Kivity
    MemoryRegion *mr = section->mr;
672 54688b1e Avi Kivity
673 54688b1e Avi Kivity
    if (mr->subpage) {
674 54688b1e Avi Kivity
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
675 54688b1e Avi Kivity
        memory_region_destroy(&subpage->iomem);
676 54688b1e Avi Kivity
        g_free(subpage);
677 54688b1e Avi Kivity
    }
678 54688b1e Avi Kivity
}
679 54688b1e Avi Kivity
680 4346ae3e Avi Kivity
static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
681 54688b1e Avi Kivity
{
682 54688b1e Avi Kivity
    unsigned i;
683 d6f2ea22 Avi Kivity
    PhysPageEntry *p;
684 54688b1e Avi Kivity
685 c19e8800 Avi Kivity
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
686 54688b1e Avi Kivity
        return;
687 54688b1e Avi Kivity
    }
688 54688b1e Avi Kivity
689 c19e8800 Avi Kivity
    p = phys_map_nodes[lp->ptr];
690 4346ae3e Avi Kivity
    for (i = 0; i < L2_SIZE; ++i) {
691 07f07b31 Avi Kivity
        if (!p[i].is_leaf) {
692 54688b1e Avi Kivity
            destroy_l2_mapping(&p[i], level - 1);
693 4346ae3e Avi Kivity
        } else {
694 c19e8800 Avi Kivity
            destroy_page_desc(p[i].ptr);
695 54688b1e Avi Kivity
        }
696 54688b1e Avi Kivity
    }
697 07f07b31 Avi Kivity
    lp->is_leaf = 0;
698 c19e8800 Avi Kivity
    lp->ptr = PHYS_MAP_NODE_NIL;
699 54688b1e Avi Kivity
}
700 54688b1e Avi Kivity
701 ac1970fb Avi Kivity
static void destroy_all_mappings(AddressSpaceDispatch *d)
702 54688b1e Avi Kivity
{
703 ac1970fb Avi Kivity
    destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
704 d6f2ea22 Avi Kivity
    phys_map_nodes_reset();
705 54688b1e Avi Kivity
}
706 54688b1e Avi Kivity
707 5312bd8b Avi Kivity
static uint16_t phys_section_add(MemoryRegionSection *section)
708 5312bd8b Avi Kivity
{
709 5312bd8b Avi Kivity
    if (phys_sections_nb == phys_sections_nb_alloc) {
710 5312bd8b Avi Kivity
        phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
711 5312bd8b Avi Kivity
        phys_sections = g_renew(MemoryRegionSection, phys_sections,
712 5312bd8b Avi Kivity
                                phys_sections_nb_alloc);
713 5312bd8b Avi Kivity
    }
714 5312bd8b Avi Kivity
    phys_sections[phys_sections_nb] = *section;
715 5312bd8b Avi Kivity
    return phys_sections_nb++;
716 5312bd8b Avi Kivity
}
717 5312bd8b Avi Kivity
718 5312bd8b Avi Kivity
static void phys_sections_clear(void)
719 5312bd8b Avi Kivity
{
720 5312bd8b Avi Kivity
    phys_sections_nb = 0;
721 5312bd8b Avi Kivity
}
722 5312bd8b Avi Kivity
723 ac1970fb Avi Kivity
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
724 0f0cb164 Avi Kivity
{
725 0f0cb164 Avi Kivity
    subpage_t *subpage;
726 a8170e5e Avi Kivity
    hwaddr base = section->offset_within_address_space
727 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
728 ac1970fb Avi Kivity
    MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
729 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
730 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
731 0f0cb164 Avi Kivity
        .size = TARGET_PAGE_SIZE,
732 0f0cb164 Avi Kivity
    };
733 a8170e5e Avi Kivity
    hwaddr start, end;
734 0f0cb164 Avi Kivity
735 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
736 0f0cb164 Avi Kivity
737 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
738 0f0cb164 Avi Kivity
        subpage = subpage_init(base);
739 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
740 ac1970fb Avi Kivity
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
741 2999097b Avi Kivity
                      phys_section_add(&subsection));
742 0f0cb164 Avi Kivity
    } else {
743 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
744 0f0cb164 Avi Kivity
    }
745 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
746 adb2a9b5 Tyler Hall
    end = start + section->size - 1;
747 0f0cb164 Avi Kivity
    subpage_register(subpage, start, end, phys_section_add(section));
748 0f0cb164 Avi Kivity
}
749 0f0cb164 Avi Kivity
750 0f0cb164 Avi Kivity
751 ac1970fb Avi Kivity
static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
752 33417e70 bellard
{
753 a8170e5e Avi Kivity
    hwaddr start_addr = section->offset_within_address_space;
754 dd81124b Avi Kivity
    ram_addr_t size = section->size;
755 a8170e5e Avi Kivity
    hwaddr addr;
756 5312bd8b Avi Kivity
    uint16_t section_index = phys_section_add(section);
757 dd81124b Avi Kivity
758 3b8e6a2d Edgar E. Iglesias
    assert(size);
759 f6f3fbca Michael S. Tsirkin
760 3b8e6a2d Edgar E. Iglesias
    addr = start_addr;
761 ac1970fb Avi Kivity
    phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
762 2999097b Avi Kivity
                  section_index);
763 33417e70 bellard
}
764 33417e70 bellard
765 ac1970fb Avi Kivity
static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
766 0f0cb164 Avi Kivity
{
767 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
768 0f0cb164 Avi Kivity
    MemoryRegionSection now = *section, remain = *section;
769 0f0cb164 Avi Kivity
770 0f0cb164 Avi Kivity
    if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
771 0f0cb164 Avi Kivity
        || (now.size < TARGET_PAGE_SIZE)) {
772 0f0cb164 Avi Kivity
        now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
773 0f0cb164 Avi Kivity
                       - now.offset_within_address_space,
774 0f0cb164 Avi Kivity
                       now.size);
775 ac1970fb Avi Kivity
        register_subpage(d, &now);
776 0f0cb164 Avi Kivity
        remain.size -= now.size;
777 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
778 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
779 0f0cb164 Avi Kivity
    }
780 69b67646 Tyler Hall
    while (remain.size >= TARGET_PAGE_SIZE) {
781 69b67646 Tyler Hall
        now = remain;
782 69b67646 Tyler Hall
        if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
783 69b67646 Tyler Hall
            now.size = TARGET_PAGE_SIZE;
784 ac1970fb Avi Kivity
            register_subpage(d, &now);
785 69b67646 Tyler Hall
        } else {
786 69b67646 Tyler Hall
            now.size &= TARGET_PAGE_MASK;
787 ac1970fb Avi Kivity
            register_multipage(d, &now);
788 69b67646 Tyler Hall
        }
789 0f0cb164 Avi Kivity
        remain.size -= now.size;
790 0f0cb164 Avi Kivity
        remain.offset_within_address_space += now.size;
791 0f0cb164 Avi Kivity
        remain.offset_within_region += now.size;
792 0f0cb164 Avi Kivity
    }
793 0f0cb164 Avi Kivity
    now = remain;
794 0f0cb164 Avi Kivity
    if (now.size) {
795 ac1970fb Avi Kivity
        register_subpage(d, &now);
796 0f0cb164 Avi Kivity
    }
797 0f0cb164 Avi Kivity
}
798 0f0cb164 Avi Kivity
799 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
800 62a2744c Sheng Yang
{
801 62a2744c Sheng Yang
    if (kvm_enabled())
802 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
803 62a2744c Sheng Yang
}
804 62a2744c Sheng Yang
805 b2a8658e Umesh Deshpande
void qemu_mutex_lock_ramlist(void)
806 b2a8658e Umesh Deshpande
{
807 b2a8658e Umesh Deshpande
    qemu_mutex_lock(&ram_list.mutex);
808 b2a8658e Umesh Deshpande
}
809 b2a8658e Umesh Deshpande
810 b2a8658e Umesh Deshpande
void qemu_mutex_unlock_ramlist(void)
811 b2a8658e Umesh Deshpande
{
812 b2a8658e Umesh Deshpande
    qemu_mutex_unlock(&ram_list.mutex);
813 b2a8658e Umesh Deshpande
}
814 b2a8658e Umesh Deshpande
815 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
816 c902760f Marcelo Tosatti
817 c902760f Marcelo Tosatti
#include <sys/vfs.h>
818 c902760f Marcelo Tosatti
819 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
820 c902760f Marcelo Tosatti
821 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
822 c902760f Marcelo Tosatti
{
823 c902760f Marcelo Tosatti
    struct statfs fs;
824 c902760f Marcelo Tosatti
    int ret;
825 c902760f Marcelo Tosatti
826 c902760f Marcelo Tosatti
    do {
827 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
828 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
829 c902760f Marcelo Tosatti
830 c902760f Marcelo Tosatti
    if (ret != 0) {
831 9742bf26 Yoshiaki Tamura
        perror(path);
832 9742bf26 Yoshiaki Tamura
        return 0;
833 c902760f Marcelo Tosatti
    }
834 c902760f Marcelo Tosatti
835 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
836 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
837 c902760f Marcelo Tosatti
838 c902760f Marcelo Tosatti
    return fs.f_bsize;
839 c902760f Marcelo Tosatti
}
840 c902760f Marcelo Tosatti
841 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
842 04b16653 Alex Williamson
                            ram_addr_t memory,
843 04b16653 Alex Williamson
                            const char *path)
844 c902760f Marcelo Tosatti
{
845 c902760f Marcelo Tosatti
    char *filename;
846 c902760f Marcelo Tosatti
    void *area;
847 c902760f Marcelo Tosatti
    int fd;
848 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
849 c902760f Marcelo Tosatti
    int flags;
850 c902760f Marcelo Tosatti
#endif
851 c902760f Marcelo Tosatti
    unsigned long hpagesize;
852 c902760f Marcelo Tosatti
853 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
854 c902760f Marcelo Tosatti
    if (!hpagesize) {
855 9742bf26 Yoshiaki Tamura
        return NULL;
856 c902760f Marcelo Tosatti
    }
857 c902760f Marcelo Tosatti
858 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
859 c902760f Marcelo Tosatti
        return NULL;
860 c902760f Marcelo Tosatti
    }
861 c902760f Marcelo Tosatti
862 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
863 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
864 c902760f Marcelo Tosatti
        return NULL;
865 c902760f Marcelo Tosatti
    }
866 c902760f Marcelo Tosatti
867 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
868 9742bf26 Yoshiaki Tamura
        return NULL;
869 c902760f Marcelo Tosatti
    }
870 c902760f Marcelo Tosatti
871 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
872 c902760f Marcelo Tosatti
    if (fd < 0) {
873 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
874 9742bf26 Yoshiaki Tamura
        free(filename);
875 9742bf26 Yoshiaki Tamura
        return NULL;
876 c902760f Marcelo Tosatti
    }
877 c902760f Marcelo Tosatti
    unlink(filename);
878 c902760f Marcelo Tosatti
    free(filename);
879 c902760f Marcelo Tosatti
880 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
881 c902760f Marcelo Tosatti
882 c902760f Marcelo Tosatti
    /*
883 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
884 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
885 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
886 c902760f Marcelo Tosatti
     * mmap will fail.
887 c902760f Marcelo Tosatti
     */
888 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
889 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
890 c902760f Marcelo Tosatti
891 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
892 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
893 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
894 c902760f Marcelo Tosatti
     * to sidestep this quirk.
895 c902760f Marcelo Tosatti
     */
896 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
897 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
898 c902760f Marcelo Tosatti
#else
899 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
900 c902760f Marcelo Tosatti
#endif
901 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
902 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
903 9742bf26 Yoshiaki Tamura
        close(fd);
904 9742bf26 Yoshiaki Tamura
        return (NULL);
905 c902760f Marcelo Tosatti
    }
906 04b16653 Alex Williamson
    block->fd = fd;
907 c902760f Marcelo Tosatti
    return area;
908 c902760f Marcelo Tosatti
}
909 c902760f Marcelo Tosatti
#endif
910 c902760f Marcelo Tosatti
911 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
912 d17b5288 Alex Williamson
{
913 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
914 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
915 04b16653 Alex Williamson
916 a3161038 Paolo Bonzini
    if (QTAILQ_EMPTY(&ram_list.blocks))
917 04b16653 Alex Williamson
        return 0;
918 04b16653 Alex Williamson
919 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
920 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
921 04b16653 Alex Williamson
922 04b16653 Alex Williamson
        end = block->offset + block->length;
923 04b16653 Alex Williamson
924 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
925 04b16653 Alex Williamson
            if (next_block->offset >= end) {
926 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
927 04b16653 Alex Williamson
            }
928 04b16653 Alex Williamson
        }
929 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
930 3e837b2c Alex Williamson
            offset = end;
931 04b16653 Alex Williamson
            mingap = next - end;
932 04b16653 Alex Williamson
        }
933 04b16653 Alex Williamson
    }
934 3e837b2c Alex Williamson
935 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
936 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
937 3e837b2c Alex Williamson
                (uint64_t)size);
938 3e837b2c Alex Williamson
        abort();
939 3e837b2c Alex Williamson
    }
940 3e837b2c Alex Williamson
941 04b16653 Alex Williamson
    return offset;
942 04b16653 Alex Williamson
}
943 04b16653 Alex Williamson
944 652d7ec2 Juan Quintela
ram_addr_t last_ram_offset(void)
945 04b16653 Alex Williamson
{
946 d17b5288 Alex Williamson
    RAMBlock *block;
947 d17b5288 Alex Williamson
    ram_addr_t last = 0;
948 d17b5288 Alex Williamson
949 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
950 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
951 d17b5288 Alex Williamson
952 d17b5288 Alex Williamson
    return last;
953 d17b5288 Alex Williamson
}
954 d17b5288 Alex Williamson
955 ddb97f1d Jason Baron
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
956 ddb97f1d Jason Baron
{
957 ddb97f1d Jason Baron
    int ret;
958 ddb97f1d Jason Baron
    QemuOpts *machine_opts;
959 ddb97f1d Jason Baron
960 ddb97f1d Jason Baron
    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
961 ddb97f1d Jason Baron
    machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
962 ddb97f1d Jason Baron
    if (machine_opts &&
963 ddb97f1d Jason Baron
        !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
964 ddb97f1d Jason Baron
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
965 ddb97f1d Jason Baron
        if (ret) {
966 ddb97f1d Jason Baron
            perror("qemu_madvise");
967 ddb97f1d Jason Baron
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
968 ddb97f1d Jason Baron
                            "but dump_guest_core=off specified\n");
969 ddb97f1d Jason Baron
        }
970 ddb97f1d Jason Baron
    }
971 ddb97f1d Jason Baron
}
972 ddb97f1d Jason Baron
973 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
974 84b89d78 Cam Macdonell
{
975 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
976 84b89d78 Cam Macdonell
977 c5705a77 Avi Kivity
    new_block = NULL;
978 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
979 c5705a77 Avi Kivity
        if (block->offset == addr) {
980 c5705a77 Avi Kivity
            new_block = block;
981 c5705a77 Avi Kivity
            break;
982 c5705a77 Avi Kivity
        }
983 c5705a77 Avi Kivity
    }
984 c5705a77 Avi Kivity
    assert(new_block);
985 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
986 84b89d78 Cam Macdonell
987 09e5ab63 Anthony Liguori
    if (dev) {
988 09e5ab63 Anthony Liguori
        char *id = qdev_get_dev_path(dev);
989 84b89d78 Cam Macdonell
        if (id) {
990 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
991 7267c094 Anthony Liguori
            g_free(id);
992 84b89d78 Cam Macdonell
        }
993 84b89d78 Cam Macdonell
    }
994 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
995 84b89d78 Cam Macdonell
996 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
997 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
998 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
999 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1000 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1001 84b89d78 Cam Macdonell
                    new_block->idstr);
1002 84b89d78 Cam Macdonell
            abort();
1003 84b89d78 Cam Macdonell
        }
1004 84b89d78 Cam Macdonell
    }
1005 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1006 c5705a77 Avi Kivity
}
1007 c5705a77 Avi Kivity
1008 8490fc78 Luiz Capitulino
static int memory_try_enable_merging(void *addr, size_t len)
1009 8490fc78 Luiz Capitulino
{
1010 8490fc78 Luiz Capitulino
    QemuOpts *opts;
1011 8490fc78 Luiz Capitulino
1012 8490fc78 Luiz Capitulino
    opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1013 8490fc78 Luiz Capitulino
    if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1014 8490fc78 Luiz Capitulino
        /* disabled by the user */
1015 8490fc78 Luiz Capitulino
        return 0;
1016 8490fc78 Luiz Capitulino
    }
1017 8490fc78 Luiz Capitulino
1018 8490fc78 Luiz Capitulino
    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1019 8490fc78 Luiz Capitulino
}
1020 8490fc78 Luiz Capitulino
1021 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1022 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
1023 c5705a77 Avi Kivity
{
1024 abb26d63 Paolo Bonzini
    RAMBlock *block, *new_block;
1025 c5705a77 Avi Kivity
1026 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
1027 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
1028 84b89d78 Cam Macdonell
1029 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1030 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1031 7c637366 Avi Kivity
    new_block->mr = mr;
1032 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
1033 6977dfe6 Yoshiaki Tamura
    if (host) {
1034 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
1035 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
1036 6977dfe6 Yoshiaki Tamura
    } else {
1037 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
1038 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
1039 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
1040 6977dfe6 Yoshiaki Tamura
            if (!new_block->host) {
1041 6977dfe6 Yoshiaki Tamura
                new_block->host = qemu_vmalloc(size);
1042 8490fc78 Luiz Capitulino
                memory_try_enable_merging(new_block->host, size);
1043 6977dfe6 Yoshiaki Tamura
            }
1044 c902760f Marcelo Tosatti
#else
1045 6977dfe6 Yoshiaki Tamura
            fprintf(stderr, "-mem-path option unsupported\n");
1046 6977dfe6 Yoshiaki Tamura
            exit(1);
1047 c902760f Marcelo Tosatti
#endif
1048 6977dfe6 Yoshiaki Tamura
        } else {
1049 868bb33f Jan Kiszka
            if (xen_enabled()) {
1050 fce537d4 Avi Kivity
                xen_ram_alloc(new_block->offset, size, mr);
1051 fdec9918 Christian Borntraeger
            } else if (kvm_enabled()) {
1052 fdec9918 Christian Borntraeger
                /* some s390/kvm configurations have special constraints */
1053 fdec9918 Christian Borntraeger
                new_block->host = kvm_vmalloc(size);
1054 432d268c Jun Nakajima
            } else {
1055 432d268c Jun Nakajima
                new_block->host = qemu_vmalloc(size);
1056 432d268c Jun Nakajima
            }
1057 8490fc78 Luiz Capitulino
            memory_try_enable_merging(new_block->host, size);
1058 6977dfe6 Yoshiaki Tamura
        }
1059 c902760f Marcelo Tosatti
    }
1060 94a6b54f pbrook
    new_block->length = size;
1061 94a6b54f pbrook
1062 abb26d63 Paolo Bonzini
    /* Keep the list sorted from biggest to smallest block.  */
1063 abb26d63 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1064 abb26d63 Paolo Bonzini
        if (block->length < new_block->length) {
1065 abb26d63 Paolo Bonzini
            break;
1066 abb26d63 Paolo Bonzini
        }
1067 abb26d63 Paolo Bonzini
    }
1068 abb26d63 Paolo Bonzini
    if (block) {
1069 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_BEFORE(block, new_block, next);
1070 abb26d63 Paolo Bonzini
    } else {
1071 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1072 abb26d63 Paolo Bonzini
    }
1073 0d6d3c87 Paolo Bonzini
    ram_list.mru_block = NULL;
1074 94a6b54f pbrook
1075 f798b07f Umesh Deshpande
    ram_list.version++;
1076 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1077 f798b07f Umesh Deshpande
1078 7267c094 Anthony Liguori
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1079 04b16653 Alex Williamson
                                       last_ram_offset() >> TARGET_PAGE_BITS);
1080 5fda043f Igor Mitsyanko
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1081 5fda043f Igor Mitsyanko
           0, size >> TARGET_PAGE_BITS);
1082 1720aeee Juan Quintela
    cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1083 94a6b54f pbrook
1084 ddb97f1d Jason Baron
    qemu_ram_setup_dump(new_block->host, size);
1085 ad0b5321 Luiz Capitulino
    qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1086 ddb97f1d Jason Baron
1087 6f0437e8 Jan Kiszka
    if (kvm_enabled())
1088 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
1089 6f0437e8 Jan Kiszka
1090 94a6b54f pbrook
    return new_block->offset;
1091 94a6b54f pbrook
}
1092 e9a1ab19 bellard
1093 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1094 6977dfe6 Yoshiaki Tamura
{
1095 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
1096 6977dfe6 Yoshiaki Tamura
}
1097 6977dfe6 Yoshiaki Tamura
1098 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
1099 1f2e98b6 Alex Williamson
{
1100 1f2e98b6 Alex Williamson
    RAMBlock *block;
1101 1f2e98b6 Alex Williamson
1102 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1103 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1104 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1105 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
1106 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1107 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1108 f798b07f Umesh Deshpande
            ram_list.version++;
1109 7267c094 Anthony Liguori
            g_free(block);
1110 b2a8658e Umesh Deshpande
            break;
1111 1f2e98b6 Alex Williamson
        }
1112 1f2e98b6 Alex Williamson
    }
1113 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1114 1f2e98b6 Alex Williamson
}
1115 1f2e98b6 Alex Williamson
1116 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
1117 e9a1ab19 bellard
{
1118 04b16653 Alex Williamson
    RAMBlock *block;
1119 04b16653 Alex Williamson
1120 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1121 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1122 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1123 04b16653 Alex Williamson
        if (addr == block->offset) {
1124 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1125 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1126 f798b07f Umesh Deshpande
            ram_list.version++;
1127 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1128 cd19cfa2 Huang Ying
                ;
1129 cd19cfa2 Huang Ying
            } else if (mem_path) {
1130 04b16653 Alex Williamson
#if defined (__linux__) && !defined(TARGET_S390X)
1131 04b16653 Alex Williamson
                if (block->fd) {
1132 04b16653 Alex Williamson
                    munmap(block->host, block->length);
1133 04b16653 Alex Williamson
                    close(block->fd);
1134 04b16653 Alex Williamson
                } else {
1135 04b16653 Alex Williamson
                    qemu_vfree(block->host);
1136 04b16653 Alex Williamson
                }
1137 fd28aa13 Jan Kiszka
#else
1138 fd28aa13 Jan Kiszka
                abort();
1139 04b16653 Alex Williamson
#endif
1140 04b16653 Alex Williamson
            } else {
1141 04b16653 Alex Williamson
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1142 04b16653 Alex Williamson
                munmap(block->host, block->length);
1143 04b16653 Alex Williamson
#else
1144 868bb33f Jan Kiszka
                if (xen_enabled()) {
1145 e41d7c69 Jan Kiszka
                    xen_invalidate_map_cache_entry(block->host);
1146 432d268c Jun Nakajima
                } else {
1147 432d268c Jun Nakajima
                    qemu_vfree(block->host);
1148 432d268c Jun Nakajima
                }
1149 04b16653 Alex Williamson
#endif
1150 04b16653 Alex Williamson
            }
1151 7267c094 Anthony Liguori
            g_free(block);
1152 b2a8658e Umesh Deshpande
            break;
1153 04b16653 Alex Williamson
        }
1154 04b16653 Alex Williamson
    }
1155 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1156 04b16653 Alex Williamson
1157 e9a1ab19 bellard
}
1158 e9a1ab19 bellard
1159 cd19cfa2 Huang Ying
#ifndef _WIN32
1160 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1161 cd19cfa2 Huang Ying
{
1162 cd19cfa2 Huang Ying
    RAMBlock *block;
1163 cd19cfa2 Huang Ying
    ram_addr_t offset;
1164 cd19cfa2 Huang Ying
    int flags;
1165 cd19cfa2 Huang Ying
    void *area, *vaddr;
1166 cd19cfa2 Huang Ying
1167 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1168 cd19cfa2 Huang Ying
        offset = addr - block->offset;
1169 cd19cfa2 Huang Ying
        if (offset < block->length) {
1170 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
1171 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1172 cd19cfa2 Huang Ying
                ;
1173 cd19cfa2 Huang Ying
            } else {
1174 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
1175 cd19cfa2 Huang Ying
                munmap(vaddr, length);
1176 cd19cfa2 Huang Ying
                if (mem_path) {
1177 cd19cfa2 Huang Ying
#if defined(__linux__) && !defined(TARGET_S390X)
1178 cd19cfa2 Huang Ying
                    if (block->fd) {
1179 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
1180 cd19cfa2 Huang Ying
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1181 cd19cfa2 Huang Ying
                            MAP_PRIVATE;
1182 cd19cfa2 Huang Ying
#else
1183 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE;
1184 cd19cfa2 Huang Ying
#endif
1185 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1186 cd19cfa2 Huang Ying
                                    flags, block->fd, offset);
1187 cd19cfa2 Huang Ying
                    } else {
1188 cd19cfa2 Huang Ying
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1189 cd19cfa2 Huang Ying
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1190 cd19cfa2 Huang Ying
                                    flags, -1, 0);
1191 cd19cfa2 Huang Ying
                    }
1192 fd28aa13 Jan Kiszka
#else
1193 fd28aa13 Jan Kiszka
                    abort();
1194 cd19cfa2 Huang Ying
#endif
1195 cd19cfa2 Huang Ying
                } else {
1196 cd19cfa2 Huang Ying
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1197 cd19cfa2 Huang Ying
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
1198 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1199 cd19cfa2 Huang Ying
                                flags, -1, 0);
1200 cd19cfa2 Huang Ying
#else
1201 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1202 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1203 cd19cfa2 Huang Ying
                                flags, -1, 0);
1204 cd19cfa2 Huang Ying
#endif
1205 cd19cfa2 Huang Ying
                }
1206 cd19cfa2 Huang Ying
                if (area != vaddr) {
1207 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
1208 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1209 cd19cfa2 Huang Ying
                            length, addr);
1210 cd19cfa2 Huang Ying
                    exit(1);
1211 cd19cfa2 Huang Ying
                }
1212 8490fc78 Luiz Capitulino
                memory_try_enable_merging(vaddr, length);
1213 ddb97f1d Jason Baron
                qemu_ram_setup_dump(vaddr, length);
1214 cd19cfa2 Huang Ying
            }
1215 cd19cfa2 Huang Ying
            return;
1216 cd19cfa2 Huang Ying
        }
1217 cd19cfa2 Huang Ying
    }
1218 cd19cfa2 Huang Ying
}
1219 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
1220 cd19cfa2 Huang Ying
1221 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
1222 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
1223 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
1224 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
1225 5579c7f3 pbrook

1226 5579c7f3 pbrook
   It should not be used for general purpose DMA.
1227 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1228 5579c7f3 pbrook
 */
1229 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
1230 dc828ca1 pbrook
{
1231 94a6b54f pbrook
    RAMBlock *block;
1232 94a6b54f pbrook
1233 b2a8658e Umesh Deshpande
    /* The list is protected by the iothread lock here.  */
1234 0d6d3c87 Paolo Bonzini
    block = ram_list.mru_block;
1235 0d6d3c87 Paolo Bonzini
    if (block && addr - block->offset < block->length) {
1236 0d6d3c87 Paolo Bonzini
        goto found;
1237 0d6d3c87 Paolo Bonzini
    }
1238 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1239 f471a17e Alex Williamson
        if (addr - block->offset < block->length) {
1240 0d6d3c87 Paolo Bonzini
            goto found;
1241 f471a17e Alex Williamson
        }
1242 94a6b54f pbrook
    }
1243 f471a17e Alex Williamson
1244 f471a17e Alex Williamson
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1245 f471a17e Alex Williamson
    abort();
1246 f471a17e Alex Williamson
1247 0d6d3c87 Paolo Bonzini
found:
1248 0d6d3c87 Paolo Bonzini
    ram_list.mru_block = block;
1249 0d6d3c87 Paolo Bonzini
    if (xen_enabled()) {
1250 0d6d3c87 Paolo Bonzini
        /* We need to check if the requested address is in the RAM
1251 0d6d3c87 Paolo Bonzini
         * because we don't want to map the entire memory in QEMU.
1252 0d6d3c87 Paolo Bonzini
         * In that case just map until the end of the page.
1253 0d6d3c87 Paolo Bonzini
         */
1254 0d6d3c87 Paolo Bonzini
        if (block->offset == 0) {
1255 0d6d3c87 Paolo Bonzini
            return xen_map_cache(addr, 0, 0);
1256 0d6d3c87 Paolo Bonzini
        } else if (block->host == NULL) {
1257 0d6d3c87 Paolo Bonzini
            block->host =
1258 0d6d3c87 Paolo Bonzini
                xen_map_cache(block->offset, block->length, 1);
1259 0d6d3c87 Paolo Bonzini
        }
1260 0d6d3c87 Paolo Bonzini
    }
1261 0d6d3c87 Paolo Bonzini
    return block->host + (addr - block->offset);
1262 dc828ca1 pbrook
}
1263 dc828ca1 pbrook
1264 0d6d3c87 Paolo Bonzini
/* Return a host pointer to ram allocated with qemu_ram_alloc.  Same as
1265 0d6d3c87 Paolo Bonzini
 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1266 0d6d3c87 Paolo Bonzini
 *
1267 0d6d3c87 Paolo Bonzini
 * ??? Is this still necessary?
1268 b2e0a138 Michael S. Tsirkin
 */
1269 8b9c99d9 Blue Swirl
static void *qemu_safe_ram_ptr(ram_addr_t addr)
1270 b2e0a138 Michael S. Tsirkin
{
1271 b2e0a138 Michael S. Tsirkin
    RAMBlock *block;
1272 b2e0a138 Michael S. Tsirkin
1273 b2a8658e Umesh Deshpande
    /* The list is protected by the iothread lock here.  */
1274 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1275 b2e0a138 Michael S. Tsirkin
        if (addr - block->offset < block->length) {
1276 868bb33f Jan Kiszka
            if (xen_enabled()) {
1277 432d268c Jun Nakajima
                /* We need to check if the requested address is in the RAM
1278 432d268c Jun Nakajima
                 * because we don't want to map the entire memory in QEMU.
1279 712c2b41 Stefano Stabellini
                 * In that case just map until the end of the page.
1280 432d268c Jun Nakajima
                 */
1281 432d268c Jun Nakajima
                if (block->offset == 0) {
1282 e41d7c69 Jan Kiszka
                    return xen_map_cache(addr, 0, 0);
1283 432d268c Jun Nakajima
                } else if (block->host == NULL) {
1284 e41d7c69 Jan Kiszka
                    block->host =
1285 e41d7c69 Jan Kiszka
                        xen_map_cache(block->offset, block->length, 1);
1286 432d268c Jun Nakajima
                }
1287 432d268c Jun Nakajima
            }
1288 b2e0a138 Michael S. Tsirkin
            return block->host + (addr - block->offset);
1289 b2e0a138 Michael S. Tsirkin
        }
1290 b2e0a138 Michael S. Tsirkin
    }
1291 b2e0a138 Michael S. Tsirkin
1292 b2e0a138 Michael S. Tsirkin
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1293 b2e0a138 Michael S. Tsirkin
    abort();
1294 b2e0a138 Michael S. Tsirkin
1295 b2e0a138 Michael S. Tsirkin
    return NULL;
1296 b2e0a138 Michael S. Tsirkin
}
1297 b2e0a138 Michael S. Tsirkin
1298 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1299 38bee5dc Stefano Stabellini
 * but takes a size argument */
1300 8b9c99d9 Blue Swirl
static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
1301 38bee5dc Stefano Stabellini
{
1302 8ab934f9 Stefano Stabellini
    if (*size == 0) {
1303 8ab934f9 Stefano Stabellini
        return NULL;
1304 8ab934f9 Stefano Stabellini
    }
1305 868bb33f Jan Kiszka
    if (xen_enabled()) {
1306 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
1307 868bb33f Jan Kiszka
    } else {
1308 38bee5dc Stefano Stabellini
        RAMBlock *block;
1309 38bee5dc Stefano Stabellini
1310 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1311 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
1312 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
1313 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
1314 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
1315 38bee5dc Stefano Stabellini
            }
1316 38bee5dc Stefano Stabellini
        }
1317 38bee5dc Stefano Stabellini
1318 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 38bee5dc Stefano Stabellini
        abort();
1320 38bee5dc Stefano Stabellini
    }
1321 38bee5dc Stefano Stabellini
}
1322 38bee5dc Stefano Stabellini
1323 050a0ddf Anthony PERARD
void qemu_put_ram_ptr(void *addr)
1324 050a0ddf Anthony PERARD
{
1325 050a0ddf Anthony PERARD
    trace_qemu_put_ram_ptr(addr);
1326 050a0ddf Anthony PERARD
}
1327 050a0ddf Anthony PERARD
1328 e890261f Marcelo Tosatti
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1329 5579c7f3 pbrook
{
1330 94a6b54f pbrook
    RAMBlock *block;
1331 94a6b54f pbrook
    uint8_t *host = ptr;
1332 94a6b54f pbrook
1333 868bb33f Jan Kiszka
    if (xen_enabled()) {
1334 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
1335 712c2b41 Stefano Stabellini
        return 0;
1336 712c2b41 Stefano Stabellini
    }
1337 712c2b41 Stefano Stabellini
1338 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1339 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
1340 432d268c Jun Nakajima
        if (block->host == NULL) {
1341 432d268c Jun Nakajima
            continue;
1342 432d268c Jun Nakajima
        }
1343 f471a17e Alex Williamson
        if (host - block->host < block->length) {
1344 e890261f Marcelo Tosatti
            *ram_addr = block->offset + (host - block->host);
1345 e890261f Marcelo Tosatti
            return 0;
1346 f471a17e Alex Williamson
        }
1347 94a6b54f pbrook
    }
1348 432d268c Jun Nakajima
1349 e890261f Marcelo Tosatti
    return -1;
1350 e890261f Marcelo Tosatti
}
1351 f471a17e Alex Williamson
1352 e890261f Marcelo Tosatti
/* Some of the softmmu routines need to translate from a host pointer
1353 e890261f Marcelo Tosatti
   (typically a TLB entry) back to a ram offset.  */
1354 e890261f Marcelo Tosatti
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1355 e890261f Marcelo Tosatti
{
1356 e890261f Marcelo Tosatti
    ram_addr_t ram_addr;
1357 f471a17e Alex Williamson
1358 e890261f Marcelo Tosatti
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1359 e890261f Marcelo Tosatti
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
1360 e890261f Marcelo Tosatti
        abort();
1361 e890261f Marcelo Tosatti
    }
1362 e890261f Marcelo Tosatti
    return ram_addr;
1363 5579c7f3 pbrook
}
1364 5579c7f3 pbrook
1365 a8170e5e Avi Kivity
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1366 0e0df1e2 Avi Kivity
                                    unsigned size)
1367 e18231a3 blueswir1
{
1368 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
1369 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1370 e18231a3 blueswir1
#endif
1371 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1372 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
1373 e18231a3 blueswir1
#endif
1374 e18231a3 blueswir1
    return 0;
1375 e18231a3 blueswir1
}
1376 e18231a3 blueswir1
1377 a8170e5e Avi Kivity
static void unassigned_mem_write(void *opaque, hwaddr addr,
1378 0e0df1e2 Avi Kivity
                                 uint64_t val, unsigned size)
1379 e18231a3 blueswir1
{
1380 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
1381 0e0df1e2 Avi Kivity
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1382 e18231a3 blueswir1
#endif
1383 5b450407 Richard Henderson
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1384 0e0df1e2 Avi Kivity
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
1385 b4f0a316 blueswir1
#endif
1386 33417e70 bellard
}
1387 33417e70 bellard
1388 0e0df1e2 Avi Kivity
static const MemoryRegionOps unassigned_mem_ops = {
1389 0e0df1e2 Avi Kivity
    .read = unassigned_mem_read,
1390 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
1391 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1392 0e0df1e2 Avi Kivity
};
1393 e18231a3 blueswir1
1394 a8170e5e Avi Kivity
static uint64_t error_mem_read(void *opaque, hwaddr addr,
1395 0e0df1e2 Avi Kivity
                               unsigned size)
1396 e18231a3 blueswir1
{
1397 0e0df1e2 Avi Kivity
    abort();
1398 e18231a3 blueswir1
}
1399 e18231a3 blueswir1
1400 a8170e5e Avi Kivity
static void error_mem_write(void *opaque, hwaddr addr,
1401 0e0df1e2 Avi Kivity
                            uint64_t value, unsigned size)
1402 e18231a3 blueswir1
{
1403 0e0df1e2 Avi Kivity
    abort();
1404 33417e70 bellard
}
1405 33417e70 bellard
1406 0e0df1e2 Avi Kivity
static const MemoryRegionOps error_mem_ops = {
1407 0e0df1e2 Avi Kivity
    .read = error_mem_read,
1408 0e0df1e2 Avi Kivity
    .write = error_mem_write,
1409 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1410 33417e70 bellard
};
1411 33417e70 bellard
1412 0e0df1e2 Avi Kivity
static const MemoryRegionOps rom_mem_ops = {
1413 0e0df1e2 Avi Kivity
    .read = error_mem_read,
1414 0e0df1e2 Avi Kivity
    .write = unassigned_mem_write,
1415 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1416 33417e70 bellard
};
1417 33417e70 bellard
1418 a8170e5e Avi Kivity
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1419 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
1420 9fa3e853 bellard
{
1421 3a7d929e bellard
    int dirty_flags;
1422 f7c11b53 Yoshiaki Tamura
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1423 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1424 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1425 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
1426 f7c11b53 Yoshiaki Tamura
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1427 9fa3e853 bellard
#endif
1428 3a7d929e bellard
    }
1429 0e0df1e2 Avi Kivity
    switch (size) {
1430 0e0df1e2 Avi Kivity
    case 1:
1431 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
1432 0e0df1e2 Avi Kivity
        break;
1433 0e0df1e2 Avi Kivity
    case 2:
1434 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
1435 0e0df1e2 Avi Kivity
        break;
1436 0e0df1e2 Avi Kivity
    case 4:
1437 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
1438 0e0df1e2 Avi Kivity
        break;
1439 0e0df1e2 Avi Kivity
    default:
1440 0e0df1e2 Avi Kivity
        abort();
1441 3a7d929e bellard
    }
1442 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1443 f7c11b53 Yoshiaki Tamura
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1444 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
1445 f23db169 bellard
       flushed */
1446 f23db169 bellard
    if (dirty_flags == 0xff)
1447 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1448 9fa3e853 bellard
}
1449 9fa3e853 bellard
1450 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
1451 0e0df1e2 Avi Kivity
    .read = error_mem_read,
1452 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
1453 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1454 1ccde1cb bellard
};
1455 1ccde1cb bellard
1456 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
1457 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
1458 0f459d16 pbrook
{
1459 9349b4f9 Andreas Färber
    CPUArchState *env = cpu_single_env;
1460 06d55cc1 aliguori
    target_ulong pc, cs_base;
1461 0f459d16 pbrook
    target_ulong vaddr;
1462 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1463 06d55cc1 aliguori
    int cpu_flags;
1464 0f459d16 pbrook
1465 06d55cc1 aliguori
    if (env->watchpoint_hit) {
1466 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
1467 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
1468 06d55cc1 aliguori
         * current instruction. */
1469 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1470 06d55cc1 aliguori
        return;
1471 06d55cc1 aliguori
    }
1472 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1473 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1474 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
1475 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1476 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
1477 6e140f28 aliguori
            if (!env->watchpoint_hit) {
1478 6e140f28 aliguori
                env->watchpoint_hit = wp;
1479 5a316526 Blue Swirl
                tb_check_watchpoint(env);
1480 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1481 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
1482 488d6577 Max Filippov
                    cpu_loop_exit(env);
1483 6e140f28 aliguori
                } else {
1484 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1485 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1486 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
1487 6e140f28 aliguori
                }
1488 06d55cc1 aliguori
            }
1489 6e140f28 aliguori
        } else {
1490 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
1491 0f459d16 pbrook
        }
1492 0f459d16 pbrook
    }
1493 0f459d16 pbrook
}
1494 0f459d16 pbrook
1495 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
1496 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
1497 6658ffb8 pbrook
   phys routines.  */
1498 a8170e5e Avi Kivity
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1499 1ec9b909 Avi Kivity
                               unsigned size)
1500 6658ffb8 pbrook
{
1501 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1502 1ec9b909 Avi Kivity
    switch (size) {
1503 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
1504 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
1505 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
1506 1ec9b909 Avi Kivity
    default: abort();
1507 1ec9b909 Avi Kivity
    }
1508 6658ffb8 pbrook
}
1509 6658ffb8 pbrook
1510 a8170e5e Avi Kivity
static void watch_mem_write(void *opaque, hwaddr addr,
1511 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
1512 6658ffb8 pbrook
{
1513 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1514 1ec9b909 Avi Kivity
    switch (size) {
1515 67364150 Max Filippov
    case 1:
1516 67364150 Max Filippov
        stb_phys(addr, val);
1517 67364150 Max Filippov
        break;
1518 67364150 Max Filippov
    case 2:
1519 67364150 Max Filippov
        stw_phys(addr, val);
1520 67364150 Max Filippov
        break;
1521 67364150 Max Filippov
    case 4:
1522 67364150 Max Filippov
        stl_phys(addr, val);
1523 67364150 Max Filippov
        break;
1524 1ec9b909 Avi Kivity
    default: abort();
1525 1ec9b909 Avi Kivity
    }
1526 6658ffb8 pbrook
}
1527 6658ffb8 pbrook
1528 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
1529 1ec9b909 Avi Kivity
    .read = watch_mem_read,
1530 1ec9b909 Avi Kivity
    .write = watch_mem_write,
1531 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1532 6658ffb8 pbrook
};
1533 6658ffb8 pbrook
1534 a8170e5e Avi Kivity
static uint64_t subpage_read(void *opaque, hwaddr addr,
1535 70c68e44 Avi Kivity
                             unsigned len)
1536 db7b5426 blueswir1
{
1537 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
1538 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
1539 5312bd8b Avi Kivity
    MemoryRegionSection *section;
1540 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1541 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1542 db7b5426 blueswir1
           mmio, len, addr, idx);
1543 db7b5426 blueswir1
#endif
1544 db7b5426 blueswir1
1545 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
1546 5312bd8b Avi Kivity
    addr += mmio->base;
1547 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
1548 5312bd8b Avi Kivity
    addr += section->offset_within_region;
1549 37ec01d4 Avi Kivity
    return io_mem_read(section->mr, addr, len);
1550 db7b5426 blueswir1
}
1551 db7b5426 blueswir1
1552 a8170e5e Avi Kivity
static void subpage_write(void *opaque, hwaddr addr,
1553 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
1554 db7b5426 blueswir1
{
1555 70c68e44 Avi Kivity
    subpage_t *mmio = opaque;
1556 f6405247 Richard Henderson
    unsigned int idx = SUBPAGE_IDX(addr);
1557 5312bd8b Avi Kivity
    MemoryRegionSection *section;
1558 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1559 70c68e44 Avi Kivity
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1560 70c68e44 Avi Kivity
           " idx %d value %"PRIx64"\n",
1561 f6405247 Richard Henderson
           __func__, mmio, len, addr, idx, value);
1562 db7b5426 blueswir1
#endif
1563 f6405247 Richard Henderson
1564 5312bd8b Avi Kivity
    section = &phys_sections[mmio->sub_section[idx]];
1565 5312bd8b Avi Kivity
    addr += mmio->base;
1566 5312bd8b Avi Kivity
    addr -= section->offset_within_address_space;
1567 5312bd8b Avi Kivity
    addr += section->offset_within_region;
1568 37ec01d4 Avi Kivity
    io_mem_write(section->mr, addr, value, len);
1569 db7b5426 blueswir1
}
1570 db7b5426 blueswir1
1571 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
1572 70c68e44 Avi Kivity
    .read = subpage_read,
1573 70c68e44 Avi Kivity
    .write = subpage_write,
1574 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1575 db7b5426 blueswir1
};
1576 db7b5426 blueswir1
1577 a8170e5e Avi Kivity
static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
1578 de712f94 Avi Kivity
                                 unsigned size)
1579 56384e8b Andreas Färber
{
1580 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
1581 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
1582 de712f94 Avi Kivity
    switch (size) {
1583 de712f94 Avi Kivity
    case 1: return ldub_p(ptr);
1584 de712f94 Avi Kivity
    case 2: return lduw_p(ptr);
1585 de712f94 Avi Kivity
    case 4: return ldl_p(ptr);
1586 de712f94 Avi Kivity
    default: abort();
1587 de712f94 Avi Kivity
    }
1588 56384e8b Andreas Färber
}
1589 56384e8b Andreas Färber
1590 a8170e5e Avi Kivity
static void subpage_ram_write(void *opaque, hwaddr addr,
1591 de712f94 Avi Kivity
                              uint64_t value, unsigned size)
1592 56384e8b Andreas Färber
{
1593 56384e8b Andreas Färber
    ram_addr_t raddr = addr;
1594 56384e8b Andreas Färber
    void *ptr = qemu_get_ram_ptr(raddr);
1595 de712f94 Avi Kivity
    switch (size) {
1596 de712f94 Avi Kivity
    case 1: return stb_p(ptr, value);
1597 de712f94 Avi Kivity
    case 2: return stw_p(ptr, value);
1598 de712f94 Avi Kivity
    case 4: return stl_p(ptr, value);
1599 de712f94 Avi Kivity
    default: abort();
1600 de712f94 Avi Kivity
    }
1601 56384e8b Andreas Färber
}
1602 56384e8b Andreas Färber
1603 de712f94 Avi Kivity
static const MemoryRegionOps subpage_ram_ops = {
1604 de712f94 Avi Kivity
    .read = subpage_ram_read,
1605 de712f94 Avi Kivity
    .write = subpage_ram_write,
1606 de712f94 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1607 56384e8b Andreas Färber
};
1608 56384e8b Andreas Färber
1609 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1610 5312bd8b Avi Kivity
                             uint16_t section)
1611 db7b5426 blueswir1
{
1612 db7b5426 blueswir1
    int idx, eidx;
1613 db7b5426 blueswir1
1614 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1615 db7b5426 blueswir1
        return -1;
1616 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
1617 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
1618 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1619 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1620 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
1621 db7b5426 blueswir1
#endif
1622 5312bd8b Avi Kivity
    if (memory_region_is_ram(phys_sections[section].mr)) {
1623 5312bd8b Avi Kivity
        MemoryRegionSection new_section = phys_sections[section];
1624 5312bd8b Avi Kivity
        new_section.mr = &io_mem_subpage_ram;
1625 5312bd8b Avi Kivity
        section = phys_section_add(&new_section);
1626 56384e8b Andreas Färber
    }
1627 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
1628 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
1629 db7b5426 blueswir1
    }
1630 db7b5426 blueswir1
1631 db7b5426 blueswir1
    return 0;
1632 db7b5426 blueswir1
}
1633 db7b5426 blueswir1
1634 a8170e5e Avi Kivity
static subpage_t *subpage_init(hwaddr base)
1635 db7b5426 blueswir1
{
1636 c227f099 Anthony Liguori
    subpage_t *mmio;
1637 db7b5426 blueswir1
1638 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
1639 1eec614b aliguori
1640 1eec614b aliguori
    mmio->base = base;
1641 70c68e44 Avi Kivity
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1642 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
1643 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
1644 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1645 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1646 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1647 db7b5426 blueswir1
#endif
1648 0f0cb164 Avi Kivity
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
1649 db7b5426 blueswir1
1650 db7b5426 blueswir1
    return mmio;
1651 db7b5426 blueswir1
}
1652 db7b5426 blueswir1
1653 5312bd8b Avi Kivity
static uint16_t dummy_section(MemoryRegion *mr)
1654 5312bd8b Avi Kivity
{
1655 5312bd8b Avi Kivity
    MemoryRegionSection section = {
1656 5312bd8b Avi Kivity
        .mr = mr,
1657 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
1658 5312bd8b Avi Kivity
        .offset_within_region = 0,
1659 5312bd8b Avi Kivity
        .size = UINT64_MAX,
1660 5312bd8b Avi Kivity
    };
1661 5312bd8b Avi Kivity
1662 5312bd8b Avi Kivity
    return phys_section_add(&section);
1663 5312bd8b Avi Kivity
}
1664 5312bd8b Avi Kivity
1665 a8170e5e Avi Kivity
MemoryRegion *iotlb_to_region(hwaddr index)
1666 aa102231 Avi Kivity
{
1667 37ec01d4 Avi Kivity
    return phys_sections[index & ~TARGET_PAGE_MASK].mr;
1668 aa102231 Avi Kivity
}
1669 aa102231 Avi Kivity
1670 e9179ce1 Avi Kivity
static void io_mem_init(void)
1671 e9179ce1 Avi Kivity
{
1672 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
1673 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1674 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1675 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
1676 0e0df1e2 Avi Kivity
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1677 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
1678 de712f94 Avi Kivity
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1679 de712f94 Avi Kivity
                          "subpage-ram", UINT64_MAX);
1680 1ec9b909 Avi Kivity
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1681 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
1682 e9179ce1 Avi Kivity
}
1683 e9179ce1 Avi Kivity
1684 ac1970fb Avi Kivity
static void mem_begin(MemoryListener *listener)
1685 ac1970fb Avi Kivity
{
1686 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1687 ac1970fb Avi Kivity
1688 ac1970fb Avi Kivity
    destroy_all_mappings(d);
1689 ac1970fb Avi Kivity
    d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1690 ac1970fb Avi Kivity
}
1691 ac1970fb Avi Kivity
1692 50c1e149 Avi Kivity
static void core_begin(MemoryListener *listener)
1693 50c1e149 Avi Kivity
{
1694 5312bd8b Avi Kivity
    phys_sections_clear();
1695 5312bd8b Avi Kivity
    phys_section_unassigned = dummy_section(&io_mem_unassigned);
1696 aa102231 Avi Kivity
    phys_section_notdirty = dummy_section(&io_mem_notdirty);
1697 aa102231 Avi Kivity
    phys_section_rom = dummy_section(&io_mem_rom);
1698 aa102231 Avi Kivity
    phys_section_watch = dummy_section(&io_mem_watch);
1699 50c1e149 Avi Kivity
}
1700 50c1e149 Avi Kivity
1701 1d71148e Avi Kivity
static void tcg_commit(MemoryListener *listener)
1702 50c1e149 Avi Kivity
{
1703 9349b4f9 Andreas Färber
    CPUArchState *env;
1704 117712c3 Avi Kivity
1705 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
1706 117712c3 Avi Kivity
       reset the modified entries */
1707 117712c3 Avi Kivity
    /* XXX: slow ! */
1708 117712c3 Avi Kivity
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1709 117712c3 Avi Kivity
        tlb_flush(env, 1);
1710 117712c3 Avi Kivity
    }
1711 50c1e149 Avi Kivity
}
1712 50c1e149 Avi Kivity
1713 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
1714 93632747 Avi Kivity
{
1715 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(1);
1716 93632747 Avi Kivity
}
1717 93632747 Avi Kivity
1718 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
1719 93632747 Avi Kivity
{
1720 93632747 Avi Kivity
    cpu_physical_memory_set_dirty_tracking(0);
1721 93632747 Avi Kivity
}
1722 93632747 Avi Kivity
1723 4855d41a Avi Kivity
static void io_region_add(MemoryListener *listener,
1724 4855d41a Avi Kivity
                          MemoryRegionSection *section)
1725 4855d41a Avi Kivity
{
1726 a2d33521 Avi Kivity
    MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1727 a2d33521 Avi Kivity
1728 a2d33521 Avi Kivity
    mrio->mr = section->mr;
1729 a2d33521 Avi Kivity
    mrio->offset = section->offset_within_region;
1730 a2d33521 Avi Kivity
    iorange_init(&mrio->iorange, &memory_region_iorange_ops,
1731 4855d41a Avi Kivity
                 section->offset_within_address_space, section->size);
1732 a2d33521 Avi Kivity
    ioport_register(&mrio->iorange);
1733 4855d41a Avi Kivity
}
1734 4855d41a Avi Kivity
1735 4855d41a Avi Kivity
static void io_region_del(MemoryListener *listener,
1736 4855d41a Avi Kivity
                          MemoryRegionSection *section)
1737 4855d41a Avi Kivity
{
1738 4855d41a Avi Kivity
    isa_unassign_ioport(section->offset_within_address_space, section->size);
1739 4855d41a Avi Kivity
}
1740 4855d41a Avi Kivity
1741 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
1742 50c1e149 Avi Kivity
    .begin = core_begin,
1743 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
1744 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
1745 ac1970fb Avi Kivity
    .priority = 1,
1746 93632747 Avi Kivity
};
1747 93632747 Avi Kivity
1748 4855d41a Avi Kivity
static MemoryListener io_memory_listener = {
1749 4855d41a Avi Kivity
    .region_add = io_region_add,
1750 4855d41a Avi Kivity
    .region_del = io_region_del,
1751 4855d41a Avi Kivity
    .priority = 0,
1752 4855d41a Avi Kivity
};
1753 4855d41a Avi Kivity
1754 1d71148e Avi Kivity
static MemoryListener tcg_memory_listener = {
1755 1d71148e Avi Kivity
    .commit = tcg_commit,
1756 1d71148e Avi Kivity
};
1757 1d71148e Avi Kivity
1758 ac1970fb Avi Kivity
void address_space_init_dispatch(AddressSpace *as)
1759 ac1970fb Avi Kivity
{
1760 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1761 ac1970fb Avi Kivity
1762 ac1970fb Avi Kivity
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1763 ac1970fb Avi Kivity
    d->listener = (MemoryListener) {
1764 ac1970fb Avi Kivity
        .begin = mem_begin,
1765 ac1970fb Avi Kivity
        .region_add = mem_add,
1766 ac1970fb Avi Kivity
        .region_nop = mem_add,
1767 ac1970fb Avi Kivity
        .priority = 0,
1768 ac1970fb Avi Kivity
    };
1769 ac1970fb Avi Kivity
    as->dispatch = d;
1770 ac1970fb Avi Kivity
    memory_listener_register(&d->listener, as);
1771 ac1970fb Avi Kivity
}
1772 ac1970fb Avi Kivity
1773 83f3c251 Avi Kivity
void address_space_destroy_dispatch(AddressSpace *as)
1774 83f3c251 Avi Kivity
{
1775 83f3c251 Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
1776 83f3c251 Avi Kivity
1777 83f3c251 Avi Kivity
    memory_listener_unregister(&d->listener);
1778 83f3c251 Avi Kivity
    destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1779 83f3c251 Avi Kivity
    g_free(d);
1780 83f3c251 Avi Kivity
    as->dispatch = NULL;
1781 83f3c251 Avi Kivity
}
1782 83f3c251 Avi Kivity
1783 62152b8a Avi Kivity
static void memory_map_init(void)
1784 62152b8a Avi Kivity
{
1785 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
1786 8417cebf Avi Kivity
    memory_region_init(system_memory, "system", INT64_MAX);
1787 2673a5da Avi Kivity
    address_space_init(&address_space_memory, system_memory);
1788 2673a5da Avi Kivity
    address_space_memory.name = "memory";
1789 309cb471 Avi Kivity
1790 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
1791 309cb471 Avi Kivity
    memory_region_init(system_io, "io", 65536);
1792 2673a5da Avi Kivity
    address_space_init(&address_space_io, system_io);
1793 2673a5da Avi Kivity
    address_space_io.name = "I/O";
1794 93632747 Avi Kivity
1795 f6790af6 Avi Kivity
    memory_listener_register(&core_memory_listener, &address_space_memory);
1796 f6790af6 Avi Kivity
    memory_listener_register(&io_memory_listener, &address_space_io);
1797 f6790af6 Avi Kivity
    memory_listener_register(&tcg_memory_listener, &address_space_memory);
1798 9e11908f Peter Maydell
1799 9e11908f Peter Maydell
    dma_context_init(&dma_context_memory, &address_space_memory,
1800 9e11908f Peter Maydell
                     NULL, NULL, NULL);
1801 62152b8a Avi Kivity
}
1802 62152b8a Avi Kivity
1803 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
1804 62152b8a Avi Kivity
{
1805 62152b8a Avi Kivity
    return system_memory;
1806 62152b8a Avi Kivity
}
1807 62152b8a Avi Kivity
1808 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
1809 309cb471 Avi Kivity
{
1810 309cb471 Avi Kivity
    return system_io;
1811 309cb471 Avi Kivity
}
1812 309cb471 Avi Kivity
1813 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
1814 e2eef170 pbrook
1815 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
1816 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
1817 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
1818 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
1819 13eb76e0 bellard
{
1820 13eb76e0 bellard
    int l, flags;
1821 13eb76e0 bellard
    target_ulong page;
1822 53a5960a pbrook
    void * p;
1823 13eb76e0 bellard
1824 13eb76e0 bellard
    while (len > 0) {
1825 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
1826 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1827 13eb76e0 bellard
        if (l > len)
1828 13eb76e0 bellard
            l = len;
1829 13eb76e0 bellard
        flags = page_get_flags(page);
1830 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
1831 a68fe89c Paul Brook
            return -1;
1832 13eb76e0 bellard
        if (is_write) {
1833 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
1834 a68fe89c Paul Brook
                return -1;
1835 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1836 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1837 a68fe89c Paul Brook
                return -1;
1838 72fb7daa aurel32
            memcpy(p, buf, l);
1839 72fb7daa aurel32
            unlock_user(p, addr, l);
1840 13eb76e0 bellard
        } else {
1841 13eb76e0 bellard
            if (!(flags & PAGE_READ))
1842 a68fe89c Paul Brook
                return -1;
1843 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1844 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1845 a68fe89c Paul Brook
                return -1;
1846 72fb7daa aurel32
            memcpy(buf, p, l);
1847 5b257578 aurel32
            unlock_user(p, addr, 0);
1848 13eb76e0 bellard
        }
1849 13eb76e0 bellard
        len -= l;
1850 13eb76e0 bellard
        buf += l;
1851 13eb76e0 bellard
        addr += l;
1852 13eb76e0 bellard
    }
1853 a68fe89c Paul Brook
    return 0;
1854 13eb76e0 bellard
}
1855 8df1cd07 bellard
1856 13eb76e0 bellard
#else
1857 51d7a9eb Anthony PERARD
1858 a8170e5e Avi Kivity
static void invalidate_and_set_dirty(hwaddr addr,
1859 a8170e5e Avi Kivity
                                     hwaddr length)
1860 51d7a9eb Anthony PERARD
{
1861 51d7a9eb Anthony PERARD
    if (!cpu_physical_memory_is_dirty(addr)) {
1862 51d7a9eb Anthony PERARD
        /* invalidate code */
1863 51d7a9eb Anthony PERARD
        tb_invalidate_phys_page_range(addr, addr + length, 0);
1864 51d7a9eb Anthony PERARD
        /* set dirty bit */
1865 51d7a9eb Anthony PERARD
        cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1866 51d7a9eb Anthony PERARD
    }
1867 e226939d Anthony PERARD
    xen_modified_memory(addr, length);
1868 51d7a9eb Anthony PERARD
}
1869 51d7a9eb Anthony PERARD
1870 a8170e5e Avi Kivity
void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1871 ac1970fb Avi Kivity
                      int len, bool is_write)
1872 13eb76e0 bellard
{
1873 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
1874 37ec01d4 Avi Kivity
    int l;
1875 13eb76e0 bellard
    uint8_t *ptr;
1876 13eb76e0 bellard
    uint32_t val;
1877 a8170e5e Avi Kivity
    hwaddr page;
1878 f3705d53 Avi Kivity
    MemoryRegionSection *section;
1879 3b46e624 ths
1880 13eb76e0 bellard
    while (len > 0) {
1881 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
1882 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1883 13eb76e0 bellard
        if (l > len)
1884 13eb76e0 bellard
            l = len;
1885 ac1970fb Avi Kivity
        section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1886 3b46e624 ths
1887 13eb76e0 bellard
        if (is_write) {
1888 f3705d53 Avi Kivity
            if (!memory_region_is_ram(section->mr)) {
1889 a8170e5e Avi Kivity
                hwaddr addr1;
1890 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
1891 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
1892 6a00d601 bellard
                   potential bugs */
1893 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
1894 1c213d19 bellard
                    /* 32 bit write access */
1895 c27004ec bellard
                    val = ldl_p(buf);
1896 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 4);
1897 13eb76e0 bellard
                    l = 4;
1898 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
1899 1c213d19 bellard
                    /* 16 bit write access */
1900 c27004ec bellard
                    val = lduw_p(buf);
1901 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 2);
1902 13eb76e0 bellard
                    l = 2;
1903 13eb76e0 bellard
                } else {
1904 1c213d19 bellard
                    /* 8 bit write access */
1905 c27004ec bellard
                    val = ldub_p(buf);
1906 37ec01d4 Avi Kivity
                    io_mem_write(section->mr, addr1, val, 1);
1907 13eb76e0 bellard
                    l = 1;
1908 13eb76e0 bellard
                }
1909 f3705d53 Avi Kivity
            } else if (!section->readonly) {
1910 8ca5692d Anthony PERARD
                ram_addr_t addr1;
1911 f3705d53 Avi Kivity
                addr1 = memory_region_get_ram_addr(section->mr)
1912 cc5bea60 Blue Swirl
                    + memory_region_section_addr(section, addr);
1913 13eb76e0 bellard
                /* RAM case */
1914 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
1915 13eb76e0 bellard
                memcpy(ptr, buf, l);
1916 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
1917 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
1918 13eb76e0 bellard
            }
1919 13eb76e0 bellard
        } else {
1920 cc5bea60 Blue Swirl
            if (!(memory_region_is_ram(section->mr) ||
1921 cc5bea60 Blue Swirl
                  memory_region_is_romd(section->mr))) {
1922 a8170e5e Avi Kivity
                hwaddr addr1;
1923 13eb76e0 bellard
                /* I/O case */
1924 cc5bea60 Blue Swirl
                addr1 = memory_region_section_addr(section, addr);
1925 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
1926 13eb76e0 bellard
                    /* 32 bit read access */
1927 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 4);
1928 c27004ec bellard
                    stl_p(buf, val);
1929 13eb76e0 bellard
                    l = 4;
1930 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
1931 13eb76e0 bellard
                    /* 16 bit read access */
1932 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 2);
1933 c27004ec bellard
                    stw_p(buf, val);
1934 13eb76e0 bellard
                    l = 2;
1935 13eb76e0 bellard
                } else {
1936 1c213d19 bellard
                    /* 8 bit read access */
1937 37ec01d4 Avi Kivity
                    val = io_mem_read(section->mr, addr1, 1);
1938 c27004ec bellard
                    stb_p(buf, val);
1939 13eb76e0 bellard
                    l = 1;
1940 13eb76e0 bellard
                }
1941 13eb76e0 bellard
            } else {
1942 13eb76e0 bellard
                /* RAM case */
1943 0a1b357f Anthony PERARD
                ptr = qemu_get_ram_ptr(section->mr->ram_addr
1944 cc5bea60 Blue Swirl
                                       + memory_region_section_addr(section,
1945 cc5bea60 Blue Swirl
                                                                    addr));
1946 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
1947 050a0ddf Anthony PERARD
                qemu_put_ram_ptr(ptr);
1948 13eb76e0 bellard
            }
1949 13eb76e0 bellard
        }
1950 13eb76e0 bellard
        len -= l;
1951 13eb76e0 bellard
        buf += l;
1952 13eb76e0 bellard
        addr += l;
1953 13eb76e0 bellard
    }
1954 13eb76e0 bellard
}
1955 8df1cd07 bellard
1956 a8170e5e Avi Kivity
void address_space_write(AddressSpace *as, hwaddr addr,
1957 ac1970fb Avi Kivity
                         const uint8_t *buf, int len)
1958 ac1970fb Avi Kivity
{
1959 ac1970fb Avi Kivity
    address_space_rw(as, addr, (uint8_t *)buf, len, true);
1960 ac1970fb Avi Kivity
}
1961 ac1970fb Avi Kivity
1962 ac1970fb Avi Kivity
/**
1963 ac1970fb Avi Kivity
 * address_space_read: read from an address space.
1964 ac1970fb Avi Kivity
 *
1965 ac1970fb Avi Kivity
 * @as: #AddressSpace to be accessed
1966 ac1970fb Avi Kivity
 * @addr: address within that address space
1967 ac1970fb Avi Kivity
 * @buf: buffer with the data transferred
1968 ac1970fb Avi Kivity
 */
1969 a8170e5e Avi Kivity
void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
1970 ac1970fb Avi Kivity
{
1971 ac1970fb Avi Kivity
    address_space_rw(as, addr, buf, len, false);
1972 ac1970fb Avi Kivity
}
1973 ac1970fb Avi Kivity
1974 ac1970fb Avi Kivity
1975 a8170e5e Avi Kivity
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
1976 ac1970fb Avi Kivity
                            int len, int is_write)
1977 ac1970fb Avi Kivity
{
1978 ac1970fb Avi Kivity
    return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1979 ac1970fb Avi Kivity
}
1980 ac1970fb Avi Kivity
1981 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
1982 a8170e5e Avi Kivity
void cpu_physical_memory_write_rom(hwaddr addr,
1983 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
1984 d0ecd2aa bellard
{
1985 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = address_space_memory.dispatch;
1986 d0ecd2aa bellard
    int l;
1987 d0ecd2aa bellard
    uint8_t *ptr;
1988 a8170e5e Avi Kivity
    hwaddr page;
1989 f3705d53 Avi Kivity
    MemoryRegionSection *section;
1990 3b46e624 ths
1991 d0ecd2aa bellard
    while (len > 0) {
1992 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
1993 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1994 d0ecd2aa bellard
        if (l > len)
1995 d0ecd2aa bellard
            l = len;
1996 ac1970fb Avi Kivity
        section = phys_page_find(d, page >> TARGET_PAGE_BITS);
1997 3b46e624 ths
1998 cc5bea60 Blue Swirl
        if (!(memory_region_is_ram(section->mr) ||
1999 cc5bea60 Blue Swirl
              memory_region_is_romd(section->mr))) {
2000 d0ecd2aa bellard
            /* do nothing */
2001 d0ecd2aa bellard
        } else {
2002 d0ecd2aa bellard
            unsigned long addr1;
2003 f3705d53 Avi Kivity
            addr1 = memory_region_get_ram_addr(section->mr)
2004 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
2005 d0ecd2aa bellard
            /* ROM/RAM case */
2006 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
2007 d0ecd2aa bellard
            memcpy(ptr, buf, l);
2008 51d7a9eb Anthony PERARD
            invalidate_and_set_dirty(addr1, l);
2009 050a0ddf Anthony PERARD
            qemu_put_ram_ptr(ptr);
2010 d0ecd2aa bellard
        }
2011 d0ecd2aa bellard
        len -= l;
2012 d0ecd2aa bellard
        buf += l;
2013 d0ecd2aa bellard
        addr += l;
2014 d0ecd2aa bellard
    }
2015 d0ecd2aa bellard
}
2016 d0ecd2aa bellard
2017 6d16c2f8 aliguori
typedef struct {
2018 6d16c2f8 aliguori
    void *buffer;
2019 a8170e5e Avi Kivity
    hwaddr addr;
2020 a8170e5e Avi Kivity
    hwaddr len;
2021 6d16c2f8 aliguori
} BounceBuffer;
2022 6d16c2f8 aliguori
2023 6d16c2f8 aliguori
static BounceBuffer bounce;
2024 6d16c2f8 aliguori
2025 ba223c29 aliguori
typedef struct MapClient {
2026 ba223c29 aliguori
    void *opaque;
2027 ba223c29 aliguori
    void (*callback)(void *opaque);
2028 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
2029 ba223c29 aliguori
} MapClient;
2030 ba223c29 aliguori
2031 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
2032 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
2033 ba223c29 aliguori
2034 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2035 ba223c29 aliguori
{
2036 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
2037 ba223c29 aliguori
2038 ba223c29 aliguori
    client->opaque = opaque;
2039 ba223c29 aliguori
    client->callback = callback;
2040 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
2041 ba223c29 aliguori
    return client;
2042 ba223c29 aliguori
}
2043 ba223c29 aliguori
2044 8b9c99d9 Blue Swirl
static void cpu_unregister_map_client(void *_client)
2045 ba223c29 aliguori
{
2046 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
2047 ba223c29 aliguori
2048 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
2049 7267c094 Anthony Liguori
    g_free(client);
2050 ba223c29 aliguori
}
2051 ba223c29 aliguori
2052 ba223c29 aliguori
static void cpu_notify_map_clients(void)
2053 ba223c29 aliguori
{
2054 ba223c29 aliguori
    MapClient *client;
2055 ba223c29 aliguori
2056 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
2057 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
2058 ba223c29 aliguori
        client->callback(client->opaque);
2059 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
2060 ba223c29 aliguori
    }
2061 ba223c29 aliguori
}
2062 ba223c29 aliguori
2063 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
2064 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
2065 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
2066 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
2067 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
2068 ba223c29 aliguori
 * likely to succeed.
2069 6d16c2f8 aliguori
 */
2070 ac1970fb Avi Kivity
void *address_space_map(AddressSpace *as,
2071 a8170e5e Avi Kivity
                        hwaddr addr,
2072 a8170e5e Avi Kivity
                        hwaddr *plen,
2073 ac1970fb Avi Kivity
                        bool is_write)
2074 6d16c2f8 aliguori
{
2075 ac1970fb Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
2076 a8170e5e Avi Kivity
    hwaddr len = *plen;
2077 a8170e5e Avi Kivity
    hwaddr todo = 0;
2078 6d16c2f8 aliguori
    int l;
2079 a8170e5e Avi Kivity
    hwaddr page;
2080 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2081 f15fbc4b Anthony PERARD
    ram_addr_t raddr = RAM_ADDR_MAX;
2082 8ab934f9 Stefano Stabellini
    ram_addr_t rlen;
2083 8ab934f9 Stefano Stabellini
    void *ret;
2084 6d16c2f8 aliguori
2085 6d16c2f8 aliguori
    while (len > 0) {
2086 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
2087 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
2088 6d16c2f8 aliguori
        if (l > len)
2089 6d16c2f8 aliguori
            l = len;
2090 ac1970fb Avi Kivity
        section = phys_page_find(d, page >> TARGET_PAGE_BITS);
2091 6d16c2f8 aliguori
2092 f3705d53 Avi Kivity
        if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
2093 38bee5dc Stefano Stabellini
            if (todo || bounce.buffer) {
2094 6d16c2f8 aliguori
                break;
2095 6d16c2f8 aliguori
            }
2096 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2097 6d16c2f8 aliguori
            bounce.addr = addr;
2098 6d16c2f8 aliguori
            bounce.len = l;
2099 6d16c2f8 aliguori
            if (!is_write) {
2100 ac1970fb Avi Kivity
                address_space_read(as, addr, bounce.buffer, l);
2101 6d16c2f8 aliguori
            }
2102 38bee5dc Stefano Stabellini
2103 38bee5dc Stefano Stabellini
            *plen = l;
2104 38bee5dc Stefano Stabellini
            return bounce.buffer;
2105 6d16c2f8 aliguori
        }
2106 8ab934f9 Stefano Stabellini
        if (!todo) {
2107 f3705d53 Avi Kivity
            raddr = memory_region_get_ram_addr(section->mr)
2108 cc5bea60 Blue Swirl
                + memory_region_section_addr(section, addr);
2109 8ab934f9 Stefano Stabellini
        }
2110 6d16c2f8 aliguori
2111 6d16c2f8 aliguori
        len -= l;
2112 6d16c2f8 aliguori
        addr += l;
2113 38bee5dc Stefano Stabellini
        todo += l;
2114 6d16c2f8 aliguori
    }
2115 8ab934f9 Stefano Stabellini
    rlen = todo;
2116 8ab934f9 Stefano Stabellini
    ret = qemu_ram_ptr_length(raddr, &rlen);
2117 8ab934f9 Stefano Stabellini
    *plen = rlen;
2118 8ab934f9 Stefano Stabellini
    return ret;
2119 6d16c2f8 aliguori
}
2120 6d16c2f8 aliguori
2121 ac1970fb Avi Kivity
/* Unmaps a memory region previously mapped by address_space_map().
2122 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
2123 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
2124 6d16c2f8 aliguori
 */
2125 a8170e5e Avi Kivity
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2126 a8170e5e Avi Kivity
                         int is_write, hwaddr access_len)
2127 6d16c2f8 aliguori
{
2128 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
2129 6d16c2f8 aliguori
        if (is_write) {
2130 e890261f Marcelo Tosatti
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2131 6d16c2f8 aliguori
            while (access_len) {
2132 6d16c2f8 aliguori
                unsigned l;
2133 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
2134 6d16c2f8 aliguori
                if (l > access_len)
2135 6d16c2f8 aliguori
                    l = access_len;
2136 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
2137 6d16c2f8 aliguori
                addr1 += l;
2138 6d16c2f8 aliguori
                access_len -= l;
2139 6d16c2f8 aliguori
            }
2140 6d16c2f8 aliguori
        }
2141 868bb33f Jan Kiszka
        if (xen_enabled()) {
2142 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
2143 050a0ddf Anthony PERARD
        }
2144 6d16c2f8 aliguori
        return;
2145 6d16c2f8 aliguori
    }
2146 6d16c2f8 aliguori
    if (is_write) {
2147 ac1970fb Avi Kivity
        address_space_write(as, bounce.addr, bounce.buffer, access_len);
2148 6d16c2f8 aliguori
    }
2149 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
2150 6d16c2f8 aliguori
    bounce.buffer = NULL;
2151 ba223c29 aliguori
    cpu_notify_map_clients();
2152 6d16c2f8 aliguori
}
2153 d0ecd2aa bellard
2154 a8170e5e Avi Kivity
void *cpu_physical_memory_map(hwaddr addr,
2155 a8170e5e Avi Kivity
                              hwaddr *plen,
2156 ac1970fb Avi Kivity
                              int is_write)
2157 ac1970fb Avi Kivity
{
2158 ac1970fb Avi Kivity
    return address_space_map(&address_space_memory, addr, plen, is_write);
2159 ac1970fb Avi Kivity
}
2160 ac1970fb Avi Kivity
2161 a8170e5e Avi Kivity
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2162 a8170e5e Avi Kivity
                               int is_write, hwaddr access_len)
2163 ac1970fb Avi Kivity
{
2164 ac1970fb Avi Kivity
    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2165 ac1970fb Avi Kivity
}
2166 ac1970fb Avi Kivity
2167 8df1cd07 bellard
/* warning: addr must be aligned */
2168 a8170e5e Avi Kivity
static inline uint32_t ldl_phys_internal(hwaddr addr,
2169 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2170 8df1cd07 bellard
{
2171 8df1cd07 bellard
    uint8_t *ptr;
2172 8df1cd07 bellard
    uint32_t val;
2173 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2174 8df1cd07 bellard
2175 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2176 3b46e624 ths
2177 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
2178 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
2179 8df1cd07 bellard
        /* I/O case */
2180 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2181 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
2182 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2183 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2184 1e78bcc1 Alexander Graf
            val = bswap32(val);
2185 1e78bcc1 Alexander Graf
        }
2186 1e78bcc1 Alexander Graf
#else
2187 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2188 1e78bcc1 Alexander Graf
            val = bswap32(val);
2189 1e78bcc1 Alexander Graf
        }
2190 1e78bcc1 Alexander Graf
#endif
2191 8df1cd07 bellard
    } else {
2192 8df1cd07 bellard
        /* RAM case */
2193 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2194 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2195 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2196 1e78bcc1 Alexander Graf
        switch (endian) {
2197 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2198 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
2199 1e78bcc1 Alexander Graf
            break;
2200 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2201 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
2202 1e78bcc1 Alexander Graf
            break;
2203 1e78bcc1 Alexander Graf
        default:
2204 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
2205 1e78bcc1 Alexander Graf
            break;
2206 1e78bcc1 Alexander Graf
        }
2207 8df1cd07 bellard
    }
2208 8df1cd07 bellard
    return val;
2209 8df1cd07 bellard
}
2210 8df1cd07 bellard
2211 a8170e5e Avi Kivity
uint32_t ldl_phys(hwaddr addr)
2212 1e78bcc1 Alexander Graf
{
2213 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2214 1e78bcc1 Alexander Graf
}
2215 1e78bcc1 Alexander Graf
2216 a8170e5e Avi Kivity
uint32_t ldl_le_phys(hwaddr addr)
2217 1e78bcc1 Alexander Graf
{
2218 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2219 1e78bcc1 Alexander Graf
}
2220 1e78bcc1 Alexander Graf
2221 a8170e5e Avi Kivity
uint32_t ldl_be_phys(hwaddr addr)
2222 1e78bcc1 Alexander Graf
{
2223 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2224 1e78bcc1 Alexander Graf
}
2225 1e78bcc1 Alexander Graf
2226 84b7b8e7 bellard
/* warning: addr must be aligned */
2227 a8170e5e Avi Kivity
static inline uint64_t ldq_phys_internal(hwaddr addr,
2228 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2229 84b7b8e7 bellard
{
2230 84b7b8e7 bellard
    uint8_t *ptr;
2231 84b7b8e7 bellard
    uint64_t val;
2232 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2233 84b7b8e7 bellard
2234 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2235 3b46e624 ths
2236 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
2237 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
2238 84b7b8e7 bellard
        /* I/O case */
2239 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2240 1e78bcc1 Alexander Graf
2241 1e78bcc1 Alexander Graf
        /* XXX This is broken when device endian != cpu endian.
2242 1e78bcc1 Alexander Graf
               Fix and add "endian" variable check */
2243 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
2244 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4) << 32;
2245 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4);
2246 84b7b8e7 bellard
#else
2247 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 4);
2248 37ec01d4 Avi Kivity
        val |= io_mem_read(section->mr, addr + 4, 4) << 32;
2249 84b7b8e7 bellard
#endif
2250 84b7b8e7 bellard
    } else {
2251 84b7b8e7 bellard
        /* RAM case */
2252 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2253 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2254 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2255 1e78bcc1 Alexander Graf
        switch (endian) {
2256 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2257 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
2258 1e78bcc1 Alexander Graf
            break;
2259 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2260 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
2261 1e78bcc1 Alexander Graf
            break;
2262 1e78bcc1 Alexander Graf
        default:
2263 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
2264 1e78bcc1 Alexander Graf
            break;
2265 1e78bcc1 Alexander Graf
        }
2266 84b7b8e7 bellard
    }
2267 84b7b8e7 bellard
    return val;
2268 84b7b8e7 bellard
}
2269 84b7b8e7 bellard
2270 a8170e5e Avi Kivity
uint64_t ldq_phys(hwaddr addr)
2271 1e78bcc1 Alexander Graf
{
2272 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2273 1e78bcc1 Alexander Graf
}
2274 1e78bcc1 Alexander Graf
2275 a8170e5e Avi Kivity
uint64_t ldq_le_phys(hwaddr addr)
2276 1e78bcc1 Alexander Graf
{
2277 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2278 1e78bcc1 Alexander Graf
}
2279 1e78bcc1 Alexander Graf
2280 a8170e5e Avi Kivity
uint64_t ldq_be_phys(hwaddr addr)
2281 1e78bcc1 Alexander Graf
{
2282 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2283 1e78bcc1 Alexander Graf
}
2284 1e78bcc1 Alexander Graf
2285 aab33094 bellard
/* XXX: optimize */
2286 a8170e5e Avi Kivity
uint32_t ldub_phys(hwaddr addr)
2287 aab33094 bellard
{
2288 aab33094 bellard
    uint8_t val;
2289 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
2290 aab33094 bellard
    return val;
2291 aab33094 bellard
}
2292 aab33094 bellard
2293 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2294 a8170e5e Avi Kivity
static inline uint32_t lduw_phys_internal(hwaddr addr,
2295 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
2296 aab33094 bellard
{
2297 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2298 733f0b02 Michael S. Tsirkin
    uint64_t val;
2299 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2300 733f0b02 Michael S. Tsirkin
2301 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2302 733f0b02 Michael S. Tsirkin
2303 cc5bea60 Blue Swirl
    if (!(memory_region_is_ram(section->mr) ||
2304 cc5bea60 Blue Swirl
          memory_region_is_romd(section->mr))) {
2305 733f0b02 Michael S. Tsirkin
        /* I/O case */
2306 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2307 37ec01d4 Avi Kivity
        val = io_mem_read(section->mr, addr, 2);
2308 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2309 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2310 1e78bcc1 Alexander Graf
            val = bswap16(val);
2311 1e78bcc1 Alexander Graf
        }
2312 1e78bcc1 Alexander Graf
#else
2313 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2314 1e78bcc1 Alexander Graf
            val = bswap16(val);
2315 1e78bcc1 Alexander Graf
        }
2316 1e78bcc1 Alexander Graf
#endif
2317 733f0b02 Michael S. Tsirkin
    } else {
2318 733f0b02 Michael S. Tsirkin
        /* RAM case */
2319 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2320 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2321 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2322 1e78bcc1 Alexander Graf
        switch (endian) {
2323 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2324 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
2325 1e78bcc1 Alexander Graf
            break;
2326 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2327 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
2328 1e78bcc1 Alexander Graf
            break;
2329 1e78bcc1 Alexander Graf
        default:
2330 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
2331 1e78bcc1 Alexander Graf
            break;
2332 1e78bcc1 Alexander Graf
        }
2333 733f0b02 Michael S. Tsirkin
    }
2334 733f0b02 Michael S. Tsirkin
    return val;
2335 aab33094 bellard
}
2336 aab33094 bellard
2337 a8170e5e Avi Kivity
uint32_t lduw_phys(hwaddr addr)
2338 1e78bcc1 Alexander Graf
{
2339 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2340 1e78bcc1 Alexander Graf
}
2341 1e78bcc1 Alexander Graf
2342 a8170e5e Avi Kivity
uint32_t lduw_le_phys(hwaddr addr)
2343 1e78bcc1 Alexander Graf
{
2344 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2345 1e78bcc1 Alexander Graf
}
2346 1e78bcc1 Alexander Graf
2347 a8170e5e Avi Kivity
uint32_t lduw_be_phys(hwaddr addr)
2348 1e78bcc1 Alexander Graf
{
2349 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2350 1e78bcc1 Alexander Graf
}
2351 1e78bcc1 Alexander Graf
2352 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
2353 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
2354 8df1cd07 bellard
   bits are used to track modified PTEs */
2355 a8170e5e Avi Kivity
void stl_phys_notdirty(hwaddr addr, uint32_t val)
2356 8df1cd07 bellard
{
2357 8df1cd07 bellard
    uint8_t *ptr;
2358 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2359 8df1cd07 bellard
2360 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2361 3b46e624 ths
2362 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2363 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2364 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2365 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2366 06ef3525 Avi Kivity
        }
2367 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
2368 8df1cd07 bellard
    } else {
2369 f3705d53 Avi Kivity
        unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
2370 06ef3525 Avi Kivity
                               & TARGET_PAGE_MASK)
2371 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
2372 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2373 8df1cd07 bellard
        stl_p(ptr, val);
2374 74576198 aliguori
2375 74576198 aliguori
        if (unlikely(in_migration)) {
2376 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
2377 74576198 aliguori
                /* invalidate code */
2378 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2379 74576198 aliguori
                /* set dirty bit */
2380 f7c11b53 Yoshiaki Tamura
                cpu_physical_memory_set_dirty_flags(
2381 f7c11b53 Yoshiaki Tamura
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
2382 74576198 aliguori
            }
2383 74576198 aliguori
        }
2384 8df1cd07 bellard
    }
2385 8df1cd07 bellard
}
2386 8df1cd07 bellard
2387 a8170e5e Avi Kivity
void stq_phys_notdirty(hwaddr addr, uint64_t val)
2388 bc98a7ef j_mayer
{
2389 bc98a7ef j_mayer
    uint8_t *ptr;
2390 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2391 bc98a7ef j_mayer
2392 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2393 3b46e624 ths
2394 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2395 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2396 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2397 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2398 06ef3525 Avi Kivity
        }
2399 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
2400 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val >> 32, 4);
2401 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
2402 bc98a7ef j_mayer
#else
2403 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, (uint32_t)val, 4);
2404 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr + 4, val >> 32, 4);
2405 bc98a7ef j_mayer
#endif
2406 bc98a7ef j_mayer
    } else {
2407 f3705d53 Avi Kivity
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
2408 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2409 cc5bea60 Blue Swirl
                               + memory_region_section_addr(section, addr));
2410 bc98a7ef j_mayer
        stq_p(ptr, val);
2411 bc98a7ef j_mayer
    }
2412 bc98a7ef j_mayer
}
2413 bc98a7ef j_mayer
2414 8df1cd07 bellard
/* warning: addr must be aligned */
2415 a8170e5e Avi Kivity
static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2416 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2417 8df1cd07 bellard
{
2418 8df1cd07 bellard
    uint8_t *ptr;
2419 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2420 8df1cd07 bellard
2421 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2422 3b46e624 ths
2423 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2424 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2425 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2426 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2427 06ef3525 Avi Kivity
        }
2428 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2429 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2430 1e78bcc1 Alexander Graf
            val = bswap32(val);
2431 1e78bcc1 Alexander Graf
        }
2432 1e78bcc1 Alexander Graf
#else
2433 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2434 1e78bcc1 Alexander Graf
            val = bswap32(val);
2435 1e78bcc1 Alexander Graf
        }
2436 1e78bcc1 Alexander Graf
#endif
2437 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 4);
2438 8df1cd07 bellard
    } else {
2439 8df1cd07 bellard
        unsigned long addr1;
2440 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2441 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
2442 8df1cd07 bellard
        /* RAM case */
2443 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2444 1e78bcc1 Alexander Graf
        switch (endian) {
2445 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2446 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
2447 1e78bcc1 Alexander Graf
            break;
2448 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2449 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
2450 1e78bcc1 Alexander Graf
            break;
2451 1e78bcc1 Alexander Graf
        default:
2452 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
2453 1e78bcc1 Alexander Graf
            break;
2454 1e78bcc1 Alexander Graf
        }
2455 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 4);
2456 8df1cd07 bellard
    }
2457 8df1cd07 bellard
}
2458 8df1cd07 bellard
2459 a8170e5e Avi Kivity
void stl_phys(hwaddr addr, uint32_t val)
2460 1e78bcc1 Alexander Graf
{
2461 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2462 1e78bcc1 Alexander Graf
}
2463 1e78bcc1 Alexander Graf
2464 a8170e5e Avi Kivity
void stl_le_phys(hwaddr addr, uint32_t val)
2465 1e78bcc1 Alexander Graf
{
2466 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2467 1e78bcc1 Alexander Graf
}
2468 1e78bcc1 Alexander Graf
2469 a8170e5e Avi Kivity
void stl_be_phys(hwaddr addr, uint32_t val)
2470 1e78bcc1 Alexander Graf
{
2471 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2472 1e78bcc1 Alexander Graf
}
2473 1e78bcc1 Alexander Graf
2474 aab33094 bellard
/* XXX: optimize */
2475 a8170e5e Avi Kivity
void stb_phys(hwaddr addr, uint32_t val)
2476 aab33094 bellard
{
2477 aab33094 bellard
    uint8_t v = val;
2478 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
2479 aab33094 bellard
}
2480 aab33094 bellard
2481 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2482 a8170e5e Avi Kivity
static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2483 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2484 aab33094 bellard
{
2485 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2486 f3705d53 Avi Kivity
    MemoryRegionSection *section;
2487 733f0b02 Michael S. Tsirkin
2488 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
2489 733f0b02 Michael S. Tsirkin
2490 f3705d53 Avi Kivity
    if (!memory_region_is_ram(section->mr) || section->readonly) {
2491 cc5bea60 Blue Swirl
        addr = memory_region_section_addr(section, addr);
2492 f3705d53 Avi Kivity
        if (memory_region_is_ram(section->mr)) {
2493 37ec01d4 Avi Kivity
            section = &phys_sections[phys_section_rom];
2494 06ef3525 Avi Kivity
        }
2495 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2496 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2497 1e78bcc1 Alexander Graf
            val = bswap16(val);
2498 1e78bcc1 Alexander Graf
        }
2499 1e78bcc1 Alexander Graf
#else
2500 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2501 1e78bcc1 Alexander Graf
            val = bswap16(val);
2502 1e78bcc1 Alexander Graf
        }
2503 1e78bcc1 Alexander Graf
#endif
2504 37ec01d4 Avi Kivity
        io_mem_write(section->mr, addr, val, 2);
2505 733f0b02 Michael S. Tsirkin
    } else {
2506 733f0b02 Michael S. Tsirkin
        unsigned long addr1;
2507 f3705d53 Avi Kivity
        addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2508 cc5bea60 Blue Swirl
            + memory_region_section_addr(section, addr);
2509 733f0b02 Michael S. Tsirkin
        /* RAM case */
2510 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
2511 1e78bcc1 Alexander Graf
        switch (endian) {
2512 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2513 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
2514 1e78bcc1 Alexander Graf
            break;
2515 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2516 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
2517 1e78bcc1 Alexander Graf
            break;
2518 1e78bcc1 Alexander Graf
        default:
2519 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
2520 1e78bcc1 Alexander Graf
            break;
2521 1e78bcc1 Alexander Graf
        }
2522 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 2);
2523 733f0b02 Michael S. Tsirkin
    }
2524 aab33094 bellard
}
2525 aab33094 bellard
2526 a8170e5e Avi Kivity
void stw_phys(hwaddr addr, uint32_t val)
2527 1e78bcc1 Alexander Graf
{
2528 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2529 1e78bcc1 Alexander Graf
}
2530 1e78bcc1 Alexander Graf
2531 a8170e5e Avi Kivity
void stw_le_phys(hwaddr addr, uint32_t val)
2532 1e78bcc1 Alexander Graf
{
2533 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2534 1e78bcc1 Alexander Graf
}
2535 1e78bcc1 Alexander Graf
2536 a8170e5e Avi Kivity
void stw_be_phys(hwaddr addr, uint32_t val)
2537 1e78bcc1 Alexander Graf
{
2538 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2539 1e78bcc1 Alexander Graf
}
2540 1e78bcc1 Alexander Graf
2541 aab33094 bellard
/* XXX: optimize */
2542 a8170e5e Avi Kivity
void stq_phys(hwaddr addr, uint64_t val)
2543 aab33094 bellard
{
2544 aab33094 bellard
    val = tswap64(val);
2545 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
2546 aab33094 bellard
}
2547 aab33094 bellard
2548 a8170e5e Avi Kivity
void stq_le_phys(hwaddr addr, uint64_t val)
2549 1e78bcc1 Alexander Graf
{
2550 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
2551 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
2552 1e78bcc1 Alexander Graf
}
2553 1e78bcc1 Alexander Graf
2554 a8170e5e Avi Kivity
void stq_be_phys(hwaddr addr, uint64_t val)
2555 1e78bcc1 Alexander Graf
{
2556 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
2557 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
2558 1e78bcc1 Alexander Graf
}
2559 1e78bcc1 Alexander Graf
2560 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
2561 9349b4f9 Andreas Färber
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
2562 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
2563 13eb76e0 bellard
{
2564 13eb76e0 bellard
    int l;
2565 a8170e5e Avi Kivity
    hwaddr phys_addr;
2566 9b3c35e0 j_mayer
    target_ulong page;
2567 13eb76e0 bellard
2568 13eb76e0 bellard
    while (len > 0) {
2569 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2570 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
2571 13eb76e0 bellard
        /* if no physical page mapped, return an error */
2572 13eb76e0 bellard
        if (phys_addr == -1)
2573 13eb76e0 bellard
            return -1;
2574 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2575 13eb76e0 bellard
        if (l > len)
2576 13eb76e0 bellard
            l = len;
2577 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
2578 5e2972fd aliguori
        if (is_write)
2579 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
2580 5e2972fd aliguori
        else
2581 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2582 13eb76e0 bellard
        len -= l;
2583 13eb76e0 bellard
        buf += l;
2584 13eb76e0 bellard
        addr += l;
2585 13eb76e0 bellard
    }
2586 13eb76e0 bellard
    return 0;
2587 13eb76e0 bellard
}
2588 a68fe89c Paul Brook
#endif
2589 13eb76e0 bellard
2590 8e4a424b Blue Swirl
#if !defined(CONFIG_USER_ONLY)
2591 8e4a424b Blue Swirl
2592 8e4a424b Blue Swirl
/*
2593 8e4a424b Blue Swirl
 * A helper function for the _utterly broken_ virtio device model to find out if
2594 8e4a424b Blue Swirl
 * it's running on a big endian machine. Don't do this at home kids!
2595 8e4a424b Blue Swirl
 */
2596 8e4a424b Blue Swirl
bool virtio_is_big_endian(void);
2597 8e4a424b Blue Swirl
bool virtio_is_big_endian(void)
2598 8e4a424b Blue Swirl
{
2599 8e4a424b Blue Swirl
#if defined(TARGET_WORDS_BIGENDIAN)
2600 8e4a424b Blue Swirl
    return true;
2601 8e4a424b Blue Swirl
#else
2602 8e4a424b Blue Swirl
    return false;
2603 8e4a424b Blue Swirl
#endif
2604 8e4a424b Blue Swirl
}
2605 8e4a424b Blue Swirl
2606 8e4a424b Blue Swirl
#endif
2607 8e4a424b Blue Swirl
2608 76f35538 Wen Congyang
#ifndef CONFIG_USER_ONLY
2609 a8170e5e Avi Kivity
bool cpu_physical_memory_is_io(hwaddr phys_addr)
2610 76f35538 Wen Congyang
{
2611 76f35538 Wen Congyang
    MemoryRegionSection *section;
2612 76f35538 Wen Congyang
2613 ac1970fb Avi Kivity
    section = phys_page_find(address_space_memory.dispatch,
2614 ac1970fb Avi Kivity
                             phys_addr >> TARGET_PAGE_BITS);
2615 76f35538 Wen Congyang
2616 76f35538 Wen Congyang
    return !(memory_region_is_ram(section->mr) ||
2617 76f35538 Wen Congyang
             memory_region_is_romd(section->mr));
2618 76f35538 Wen Congyang
}
2619 76f35538 Wen Congyang
#endif