Statistics
| Branch: | Revision:

root / exec.c @ feature-archipelago

History | View | Annotate | Download (76.4 kB)

1 54936004 bellard
/*
2 5b6dd868 Blue Swirl
 *  Virtual page mapping
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 777872e5 Stefan Weil
#ifndef _WIN32
21 a98d49b1 bellard
#include <sys/types.h>
22 d5a8f07c bellard
#include <sys/mman.h>
23 d5a8f07c bellard
#endif
24 54936004 bellard
25 055403b2 Stefan Weil
#include "qemu-common.h"
26 6180a181 bellard
#include "cpu.h"
27 b67d9a52 bellard
#include "tcg.h"
28 b3c7724c pbrook
#include "hw/hw.h"
29 cc9e98cb Alex Williamson
#include "hw/qdev.h"
30 1de7afc9 Paolo Bonzini
#include "qemu/osdep.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
32 2ff3de68 Markus Armbruster
#include "sysemu/sysemu.h"
33 0d09e41a Paolo Bonzini
#include "hw/xen/xen.h"
34 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
35 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
36 022c62cb Paolo Bonzini
#include "exec/memory.h"
37 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
38 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
39 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
40 53a5960a pbrook
#include <qemu.h>
41 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
42 9c17d615 Paolo Bonzini
#include "sysemu/xen-mapcache.h"
43 6506e4f9 Stefano Stabellini
#include "trace.h"
44 53a5960a pbrook
#endif
45 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
46 54936004 bellard
47 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
48 5b6dd868 Blue Swirl
#include "translate-all.h"
49 0cac1b66 Blue Swirl
50 022c62cb Paolo Bonzini
#include "exec/memory-internal.h"
51 220c3ebd Juan Quintela
#include "exec/ram_addr.h"
52 582b55a9 Alexander Graf
#include "qemu/cache-utils.h"
53 67d95c15 Avi Kivity
54 b35ba30f Michael S. Tsirkin
#include "qemu/range.h"
55 b35ba30f Michael S. Tsirkin
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 981fdf23 Juan Quintela
static bool in_migration;
60 94a6b54f pbrook
61 a3161038 Paolo Bonzini
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62 62152b8a Avi Kivity
63 62152b8a Avi Kivity
static MemoryRegion *system_memory;
64 309cb471 Avi Kivity
static MemoryRegion *system_io;
65 62152b8a Avi Kivity
66 f6790af6 Avi Kivity
AddressSpace address_space_io;
67 f6790af6 Avi Kivity
AddressSpace address_space_memory;
68 2673a5da Avi Kivity
69 0844e007 Paolo Bonzini
MemoryRegion io_mem_rom, io_mem_notdirty;
70 acc9d80b Jan Kiszka
static MemoryRegion io_mem_unassigned;
71 0e0df1e2 Avi Kivity
72 e2eef170 pbrook
#endif
73 9fa3e853 bellard
74 bdc44640 Andreas Färber
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
75 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
76 6a00d601 bellard
   cpu_exec() */
77 4917cf44 Andreas Färber
DEFINE_TLS(CPUState *, current_cpu);
78 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
79 bf20dc07 ths
   1 = Precise instruction counting.
80 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
81 5708fc66 Paolo Bonzini
int use_icount;
82 6a00d601 bellard
83 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
84 4346ae3e Avi Kivity
85 1db8abb1 Paolo Bonzini
typedef struct PhysPageEntry PhysPageEntry;
86 1db8abb1 Paolo Bonzini
87 1db8abb1 Paolo Bonzini
struct PhysPageEntry {
88 9736e55b Michael S. Tsirkin
    /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
89 8b795765 Michael S. Tsirkin
    uint32_t skip : 6;
90 9736e55b Michael S. Tsirkin
     /* index into phys_sections (!skip) or phys_map_nodes (skip) */
91 8b795765 Michael S. Tsirkin
    uint32_t ptr : 26;
92 1db8abb1 Paolo Bonzini
};
93 1db8abb1 Paolo Bonzini
94 8b795765 Michael S. Tsirkin
#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
95 8b795765 Michael S. Tsirkin
96 03f49957 Paolo Bonzini
/* Size of the L2 (and L3, etc) page tables.  */
97 57271d63 Paolo Bonzini
#define ADDR_SPACE_BITS 64
98 03f49957 Paolo Bonzini
99 026736ce Michael S. Tsirkin
#define P_L2_BITS 9
100 03f49957 Paolo Bonzini
#define P_L2_SIZE (1 << P_L2_BITS)
101 03f49957 Paolo Bonzini
102 03f49957 Paolo Bonzini
#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
103 03f49957 Paolo Bonzini
104 03f49957 Paolo Bonzini
typedef PhysPageEntry Node[P_L2_SIZE];
105 0475d94f Paolo Bonzini
106 53cb28cb Marcel Apfelbaum
typedef struct PhysPageMap {
107 53cb28cb Marcel Apfelbaum
    unsigned sections_nb;
108 53cb28cb Marcel Apfelbaum
    unsigned sections_nb_alloc;
109 53cb28cb Marcel Apfelbaum
    unsigned nodes_nb;
110 53cb28cb Marcel Apfelbaum
    unsigned nodes_nb_alloc;
111 53cb28cb Marcel Apfelbaum
    Node *nodes;
112 53cb28cb Marcel Apfelbaum
    MemoryRegionSection *sections;
113 53cb28cb Marcel Apfelbaum
} PhysPageMap;
114 53cb28cb Marcel Apfelbaum
115 1db8abb1 Paolo Bonzini
struct AddressSpaceDispatch {
116 1db8abb1 Paolo Bonzini
    /* This is a multi-level map on the physical address space.
117 1db8abb1 Paolo Bonzini
     * The bottom level has pointers to MemoryRegionSections.
118 1db8abb1 Paolo Bonzini
     */
119 1db8abb1 Paolo Bonzini
    PhysPageEntry phys_map;
120 53cb28cb Marcel Apfelbaum
    PhysPageMap map;
121 acc9d80b Jan Kiszka
    AddressSpace *as;
122 1db8abb1 Paolo Bonzini
};
123 1db8abb1 Paolo Bonzini
124 90260c6c Jan Kiszka
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125 90260c6c Jan Kiszka
typedef struct subpage_t {
126 90260c6c Jan Kiszka
    MemoryRegion iomem;
127 acc9d80b Jan Kiszka
    AddressSpace *as;
128 90260c6c Jan Kiszka
    hwaddr base;
129 90260c6c Jan Kiszka
    uint16_t sub_section[TARGET_PAGE_SIZE];
130 90260c6c Jan Kiszka
} subpage_t;
131 90260c6c Jan Kiszka
132 b41aac4f Liu Ping Fan
#define PHYS_SECTION_UNASSIGNED 0
133 b41aac4f Liu Ping Fan
#define PHYS_SECTION_NOTDIRTY 1
134 b41aac4f Liu Ping Fan
#define PHYS_SECTION_ROM 2
135 b41aac4f Liu Ping Fan
#define PHYS_SECTION_WATCH 3
136 5312bd8b Avi Kivity
137 e2eef170 pbrook
static void io_mem_init(void);
138 62152b8a Avi Kivity
static void memory_map_init(void);
139 09daed84 Edgar E. Iglesias
static void tcg_commit(MemoryListener *listener);
140 e2eef170 pbrook
141 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
142 6658ffb8 pbrook
#endif
143 fd6ce8f6 bellard
144 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
145 d6f2ea22 Avi Kivity
146 53cb28cb Marcel Apfelbaum
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
147 d6f2ea22 Avi Kivity
{
148 53cb28cb Marcel Apfelbaum
    if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
149 53cb28cb Marcel Apfelbaum
        map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
150 53cb28cb Marcel Apfelbaum
        map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
151 53cb28cb Marcel Apfelbaum
        map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
152 d6f2ea22 Avi Kivity
    }
153 f7bf5461 Avi Kivity
}
154 f7bf5461 Avi Kivity
155 53cb28cb Marcel Apfelbaum
static uint32_t phys_map_node_alloc(PhysPageMap *map)
156 f7bf5461 Avi Kivity
{
157 f7bf5461 Avi Kivity
    unsigned i;
158 8b795765 Michael S. Tsirkin
    uint32_t ret;
159 f7bf5461 Avi Kivity
160 53cb28cb Marcel Apfelbaum
    ret = map->nodes_nb++;
161 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
162 53cb28cb Marcel Apfelbaum
    assert(ret != map->nodes_nb_alloc);
163 03f49957 Paolo Bonzini
    for (i = 0; i < P_L2_SIZE; ++i) {
164 53cb28cb Marcel Apfelbaum
        map->nodes[ret][i].skip = 1;
165 53cb28cb Marcel Apfelbaum
        map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
166 d6f2ea22 Avi Kivity
    }
167 f7bf5461 Avi Kivity
    return ret;
168 d6f2ea22 Avi Kivity
}
169 d6f2ea22 Avi Kivity
170 53cb28cb Marcel Apfelbaum
static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
171 53cb28cb Marcel Apfelbaum
                                hwaddr *index, hwaddr *nb, uint16_t leaf,
172 2999097b Avi Kivity
                                int level)
173 f7bf5461 Avi Kivity
{
174 f7bf5461 Avi Kivity
    PhysPageEntry *p;
175 f7bf5461 Avi Kivity
    int i;
176 03f49957 Paolo Bonzini
    hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
177 108c49b8 bellard
178 9736e55b Michael S. Tsirkin
    if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
179 53cb28cb Marcel Apfelbaum
        lp->ptr = phys_map_node_alloc(map);
180 53cb28cb Marcel Apfelbaum
        p = map->nodes[lp->ptr];
181 f7bf5461 Avi Kivity
        if (level == 0) {
182 03f49957 Paolo Bonzini
            for (i = 0; i < P_L2_SIZE; i++) {
183 9736e55b Michael S. Tsirkin
                p[i].skip = 0;
184 b41aac4f Liu Ping Fan
                p[i].ptr = PHYS_SECTION_UNASSIGNED;
185 4346ae3e Avi Kivity
            }
186 67c4d23c pbrook
        }
187 f7bf5461 Avi Kivity
    } else {
188 53cb28cb Marcel Apfelbaum
        p = map->nodes[lp->ptr];
189 92e873b9 bellard
    }
190 03f49957 Paolo Bonzini
    lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
191 f7bf5461 Avi Kivity
192 03f49957 Paolo Bonzini
    while (*nb && lp < &p[P_L2_SIZE]) {
193 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
194 9736e55b Michael S. Tsirkin
            lp->skip = 0;
195 c19e8800 Avi Kivity
            lp->ptr = leaf;
196 07f07b31 Avi Kivity
            *index += step;
197 07f07b31 Avi Kivity
            *nb -= step;
198 2999097b Avi Kivity
        } else {
199 53cb28cb Marcel Apfelbaum
            phys_page_set_level(map, lp, index, nb, leaf, level - 1);
200 2999097b Avi Kivity
        }
201 2999097b Avi Kivity
        ++lp;
202 f7bf5461 Avi Kivity
    }
203 f7bf5461 Avi Kivity
}
204 f7bf5461 Avi Kivity
205 ac1970fb Avi Kivity
static void phys_page_set(AddressSpaceDispatch *d,
206 a8170e5e Avi Kivity
                          hwaddr index, hwaddr nb,
207 2999097b Avi Kivity
                          uint16_t leaf)
208 f7bf5461 Avi Kivity
{
209 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
210 53cb28cb Marcel Apfelbaum
    phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
211 5cd2c5b6 Richard Henderson
212 53cb28cb Marcel Apfelbaum
    phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
213 92e873b9 bellard
}
214 92e873b9 bellard
215 b35ba30f Michael S. Tsirkin
/* Compact a non leaf page entry. Simply detect that the entry has a single child,
216 b35ba30f Michael S. Tsirkin
 * and update our entry so we can skip it and go directly to the destination.
217 b35ba30f Michael S. Tsirkin
 */
218 b35ba30f Michael S. Tsirkin
static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
219 b35ba30f Michael S. Tsirkin
{
220 b35ba30f Michael S. Tsirkin
    unsigned valid_ptr = P_L2_SIZE;
221 b35ba30f Michael S. Tsirkin
    int valid = 0;
222 b35ba30f Michael S. Tsirkin
    PhysPageEntry *p;
223 b35ba30f Michael S. Tsirkin
    int i;
224 b35ba30f Michael S. Tsirkin
225 b35ba30f Michael S. Tsirkin
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
226 b35ba30f Michael S. Tsirkin
        return;
227 b35ba30f Michael S. Tsirkin
    }
228 b35ba30f Michael S. Tsirkin
229 b35ba30f Michael S. Tsirkin
    p = nodes[lp->ptr];
230 b35ba30f Michael S. Tsirkin
    for (i = 0; i < P_L2_SIZE; i++) {
231 b35ba30f Michael S. Tsirkin
        if (p[i].ptr == PHYS_MAP_NODE_NIL) {
232 b35ba30f Michael S. Tsirkin
            continue;
233 b35ba30f Michael S. Tsirkin
        }
234 b35ba30f Michael S. Tsirkin
235 b35ba30f Michael S. Tsirkin
        valid_ptr = i;
236 b35ba30f Michael S. Tsirkin
        valid++;
237 b35ba30f Michael S. Tsirkin
        if (p[i].skip) {
238 b35ba30f Michael S. Tsirkin
            phys_page_compact(&p[i], nodes, compacted);
239 b35ba30f Michael S. Tsirkin
        }
240 b35ba30f Michael S. Tsirkin
    }
241 b35ba30f Michael S. Tsirkin
242 b35ba30f Michael S. Tsirkin
    /* We can only compress if there's only one child. */
243 b35ba30f Michael S. Tsirkin
    if (valid != 1) {
244 b35ba30f Michael S. Tsirkin
        return;
245 b35ba30f Michael S. Tsirkin
    }
246 b35ba30f Michael S. Tsirkin
247 b35ba30f Michael S. Tsirkin
    assert(valid_ptr < P_L2_SIZE);
248 b35ba30f Michael S. Tsirkin
249 b35ba30f Michael S. Tsirkin
    /* Don't compress if it won't fit in the # of bits we have. */
250 b35ba30f Michael S. Tsirkin
    if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
251 b35ba30f Michael S. Tsirkin
        return;
252 b35ba30f Michael S. Tsirkin
    }
253 b35ba30f Michael S. Tsirkin
254 b35ba30f Michael S. Tsirkin
    lp->ptr = p[valid_ptr].ptr;
255 b35ba30f Michael S. Tsirkin
    if (!p[valid_ptr].skip) {
256 b35ba30f Michael S. Tsirkin
        /* If our only child is a leaf, make this a leaf. */
257 b35ba30f Michael S. Tsirkin
        /* By design, we should have made this node a leaf to begin with so we
258 b35ba30f Michael S. Tsirkin
         * should never reach here.
259 b35ba30f Michael S. Tsirkin
         * But since it's so simple to handle this, let's do it just in case we
260 b35ba30f Michael S. Tsirkin
         * change this rule.
261 b35ba30f Michael S. Tsirkin
         */
262 b35ba30f Michael S. Tsirkin
        lp->skip = 0;
263 b35ba30f Michael S. Tsirkin
    } else {
264 b35ba30f Michael S. Tsirkin
        lp->skip += p[valid_ptr].skip;
265 b35ba30f Michael S. Tsirkin
    }
266 b35ba30f Michael S. Tsirkin
}
267 b35ba30f Michael S. Tsirkin
268 b35ba30f Michael S. Tsirkin
static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
269 b35ba30f Michael S. Tsirkin
{
270 b35ba30f Michael S. Tsirkin
    DECLARE_BITMAP(compacted, nodes_nb);
271 b35ba30f Michael S. Tsirkin
272 b35ba30f Michael S. Tsirkin
    if (d->phys_map.skip) {
273 53cb28cb Marcel Apfelbaum
        phys_page_compact(&d->phys_map, d->map.nodes, compacted);
274 b35ba30f Michael S. Tsirkin
    }
275 b35ba30f Michael S. Tsirkin
}
276 b35ba30f Michael S. Tsirkin
277 97115a8d Michael S. Tsirkin
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
278 9affd6fc Paolo Bonzini
                                           Node *nodes, MemoryRegionSection *sections)
279 92e873b9 bellard
{
280 31ab2b4a Avi Kivity
    PhysPageEntry *p;
281 97115a8d Michael S. Tsirkin
    hwaddr index = addr >> TARGET_PAGE_BITS;
282 31ab2b4a Avi Kivity
    int i;
283 f1f6e3b8 Avi Kivity
284 9736e55b Michael S. Tsirkin
    for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
285 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
286 9affd6fc Paolo Bonzini
            return &sections[PHYS_SECTION_UNASSIGNED];
287 31ab2b4a Avi Kivity
        }
288 9affd6fc Paolo Bonzini
        p = nodes[lp.ptr];
289 03f49957 Paolo Bonzini
        lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
290 5312bd8b Avi Kivity
    }
291 b35ba30f Michael S. Tsirkin
292 b35ba30f Michael S. Tsirkin
    if (sections[lp.ptr].size.hi ||
293 b35ba30f Michael S. Tsirkin
        range_covers_byte(sections[lp.ptr].offset_within_address_space,
294 b35ba30f Michael S. Tsirkin
                          sections[lp.ptr].size.lo, addr)) {
295 b35ba30f Michael S. Tsirkin
        return &sections[lp.ptr];
296 b35ba30f Michael S. Tsirkin
    } else {
297 b35ba30f Michael S. Tsirkin
        return &sections[PHYS_SECTION_UNASSIGNED];
298 b35ba30f Michael S. Tsirkin
    }
299 f3705d53 Avi Kivity
}
300 f3705d53 Avi Kivity
301 e5548617 Blue Swirl
bool memory_region_is_unassigned(MemoryRegion *mr)
302 e5548617 Blue Swirl
{
303 2a8e7499 Paolo Bonzini
    return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
304 5b6dd868 Blue Swirl
        && mr != &io_mem_watch;
305 fd6ce8f6 bellard
}
306 149f54b5 Paolo Bonzini
307 c7086b4a Paolo Bonzini
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
308 90260c6c Jan Kiszka
                                                        hwaddr addr,
309 90260c6c Jan Kiszka
                                                        bool resolve_subpage)
310 9f029603 Jan Kiszka
{
311 90260c6c Jan Kiszka
    MemoryRegionSection *section;
312 90260c6c Jan Kiszka
    subpage_t *subpage;
313 90260c6c Jan Kiszka
314 53cb28cb Marcel Apfelbaum
    section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
315 90260c6c Jan Kiszka
    if (resolve_subpage && section->mr->subpage) {
316 90260c6c Jan Kiszka
        subpage = container_of(section->mr, subpage_t, iomem);
317 53cb28cb Marcel Apfelbaum
        section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
318 90260c6c Jan Kiszka
    }
319 90260c6c Jan Kiszka
    return section;
320 9f029603 Jan Kiszka
}
321 9f029603 Jan Kiszka
322 90260c6c Jan Kiszka
static MemoryRegionSection *
323 c7086b4a Paolo Bonzini
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
324 90260c6c Jan Kiszka
                                 hwaddr *plen, bool resolve_subpage)
325 149f54b5 Paolo Bonzini
{
326 149f54b5 Paolo Bonzini
    MemoryRegionSection *section;
327 a87f3954 Paolo Bonzini
    Int128 diff;
328 149f54b5 Paolo Bonzini
329 c7086b4a Paolo Bonzini
    section = address_space_lookup_region(d, addr, resolve_subpage);
330 149f54b5 Paolo Bonzini
    /* Compute offset within MemoryRegionSection */
331 149f54b5 Paolo Bonzini
    addr -= section->offset_within_address_space;
332 149f54b5 Paolo Bonzini
333 149f54b5 Paolo Bonzini
    /* Compute offset within MemoryRegion */
334 149f54b5 Paolo Bonzini
    *xlat = addr + section->offset_within_region;
335 149f54b5 Paolo Bonzini
336 149f54b5 Paolo Bonzini
    diff = int128_sub(section->mr->size, int128_make64(addr));
337 3752a036 Peter Maydell
    *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
338 149f54b5 Paolo Bonzini
    return section;
339 149f54b5 Paolo Bonzini
}
340 90260c6c Jan Kiszka
341 a87f3954 Paolo Bonzini
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
342 a87f3954 Paolo Bonzini
{
343 a87f3954 Paolo Bonzini
    if (memory_region_is_ram(mr)) {
344 a87f3954 Paolo Bonzini
        return !(is_write && mr->readonly);
345 a87f3954 Paolo Bonzini
    }
346 a87f3954 Paolo Bonzini
    if (memory_region_is_romd(mr)) {
347 a87f3954 Paolo Bonzini
        return !is_write;
348 a87f3954 Paolo Bonzini
    }
349 a87f3954 Paolo Bonzini
350 a87f3954 Paolo Bonzini
    return false;
351 a87f3954 Paolo Bonzini
}
352 a87f3954 Paolo Bonzini
353 5c8a00ce Paolo Bonzini
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
354 5c8a00ce Paolo Bonzini
                                      hwaddr *xlat, hwaddr *plen,
355 5c8a00ce Paolo Bonzini
                                      bool is_write)
356 90260c6c Jan Kiszka
{
357 30951157 Avi Kivity
    IOMMUTLBEntry iotlb;
358 30951157 Avi Kivity
    MemoryRegionSection *section;
359 30951157 Avi Kivity
    MemoryRegion *mr;
360 30951157 Avi Kivity
    hwaddr len = *plen;
361 30951157 Avi Kivity
362 30951157 Avi Kivity
    for (;;) {
363 a87f3954 Paolo Bonzini
        section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
364 30951157 Avi Kivity
        mr = section->mr;
365 30951157 Avi Kivity
366 30951157 Avi Kivity
        if (!mr->iommu_ops) {
367 30951157 Avi Kivity
            break;
368 30951157 Avi Kivity
        }
369 30951157 Avi Kivity
370 30951157 Avi Kivity
        iotlb = mr->iommu_ops->translate(mr, addr);
371 30951157 Avi Kivity
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
372 30951157 Avi Kivity
                | (addr & iotlb.addr_mask));
373 30951157 Avi Kivity
        len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
374 30951157 Avi Kivity
        if (!(iotlb.perm & (1 << is_write))) {
375 30951157 Avi Kivity
            mr = &io_mem_unassigned;
376 30951157 Avi Kivity
            break;
377 30951157 Avi Kivity
        }
378 30951157 Avi Kivity
379 30951157 Avi Kivity
        as = iotlb.target_as;
380 30951157 Avi Kivity
    }
381 30951157 Avi Kivity
382 a87f3954 Paolo Bonzini
    if (memory_access_is_direct(mr, is_write)) {
383 a87f3954 Paolo Bonzini
        hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
384 a87f3954 Paolo Bonzini
        len = MIN(page, len);
385 a87f3954 Paolo Bonzini
    }
386 a87f3954 Paolo Bonzini
387 30951157 Avi Kivity
    *plen = len;
388 30951157 Avi Kivity
    *xlat = addr;
389 30951157 Avi Kivity
    return mr;
390 90260c6c Jan Kiszka
}
391 90260c6c Jan Kiszka
392 90260c6c Jan Kiszka
MemoryRegionSection *
393 90260c6c Jan Kiszka
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
394 90260c6c Jan Kiszka
                                  hwaddr *plen)
395 90260c6c Jan Kiszka
{
396 30951157 Avi Kivity
    MemoryRegionSection *section;
397 c7086b4a Paolo Bonzini
    section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
398 30951157 Avi Kivity
399 30951157 Avi Kivity
    assert(!section->mr->iommu_ops);
400 30951157 Avi Kivity
    return section;
401 90260c6c Jan Kiszka
}
402 5b6dd868 Blue Swirl
#endif
403 fd6ce8f6 bellard
404 5b6dd868 Blue Swirl
void cpu_exec_init_all(void)
405 fdbb84d1 Yeongkyoon Lee
{
406 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
407 b2a8658e Umesh Deshpande
    qemu_mutex_init(&ram_list.mutex);
408 5b6dd868 Blue Swirl
    memory_map_init();
409 5b6dd868 Blue Swirl
    io_mem_init();
410 fdbb84d1 Yeongkyoon Lee
#endif
411 5b6dd868 Blue Swirl
}
412 fdbb84d1 Yeongkyoon Lee
413 b170fce3 Andreas Färber
#if !defined(CONFIG_USER_ONLY)
414 5b6dd868 Blue Swirl
415 5b6dd868 Blue Swirl
static int cpu_common_post_load(void *opaque, int version_id)
416 fd6ce8f6 bellard
{
417 259186a7 Andreas Färber
    CPUState *cpu = opaque;
418 a513fe19 bellard
419 5b6dd868 Blue Swirl
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
420 5b6dd868 Blue Swirl
       version_id is increased. */
421 259186a7 Andreas Färber
    cpu->interrupt_request &= ~0x01;
422 259186a7 Andreas Färber
    tlb_flush(cpu->env_ptr, 1);
423 5b6dd868 Blue Swirl
424 5b6dd868 Blue Swirl
    return 0;
425 a513fe19 bellard
}
426 7501267e bellard
427 1a1562f5 Andreas Färber
const VMStateDescription vmstate_cpu_common = {
428 5b6dd868 Blue Swirl
    .name = "cpu_common",
429 5b6dd868 Blue Swirl
    .version_id = 1,
430 5b6dd868 Blue Swirl
    .minimum_version_id = 1,
431 5b6dd868 Blue Swirl
    .minimum_version_id_old = 1,
432 5b6dd868 Blue Swirl
    .post_load = cpu_common_post_load,
433 5b6dd868 Blue Swirl
    .fields      = (VMStateField []) {
434 259186a7 Andreas Färber
        VMSTATE_UINT32(halted, CPUState),
435 259186a7 Andreas Färber
        VMSTATE_UINT32(interrupt_request, CPUState),
436 5b6dd868 Blue Swirl
        VMSTATE_END_OF_LIST()
437 5b6dd868 Blue Swirl
    }
438 5b6dd868 Blue Swirl
};
439 1a1562f5 Andreas Färber
440 5b6dd868 Blue Swirl
#endif
441 ea041c0e bellard
442 38d8f5c8 Andreas Färber
CPUState *qemu_get_cpu(int index)
443 ea041c0e bellard
{
444 bdc44640 Andreas Färber
    CPUState *cpu;
445 ea041c0e bellard
446 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
447 55e5c285 Andreas Färber
        if (cpu->cpu_index == index) {
448 bdc44640 Andreas Färber
            return cpu;
449 55e5c285 Andreas Färber
        }
450 ea041c0e bellard
    }
451 5b6dd868 Blue Swirl
452 bdc44640 Andreas Färber
    return NULL;
453 ea041c0e bellard
}
454 ea041c0e bellard
455 09daed84 Edgar E. Iglesias
#if !defined(CONFIG_USER_ONLY)
456 09daed84 Edgar E. Iglesias
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
457 09daed84 Edgar E. Iglesias
{
458 09daed84 Edgar E. Iglesias
    /* We only support one address space per cpu at the moment.  */
459 09daed84 Edgar E. Iglesias
    assert(cpu->as == as);
460 09daed84 Edgar E. Iglesias
461 09daed84 Edgar E. Iglesias
    if (cpu->tcg_as_listener) {
462 09daed84 Edgar E. Iglesias
        memory_listener_unregister(cpu->tcg_as_listener);
463 09daed84 Edgar E. Iglesias
    } else {
464 09daed84 Edgar E. Iglesias
        cpu->tcg_as_listener = g_new0(MemoryListener, 1);
465 09daed84 Edgar E. Iglesias
    }
466 09daed84 Edgar E. Iglesias
    cpu->tcg_as_listener->commit = tcg_commit;
467 09daed84 Edgar E. Iglesias
    memory_listener_register(cpu->tcg_as_listener, as);
468 09daed84 Edgar E. Iglesias
}
469 09daed84 Edgar E. Iglesias
#endif
470 09daed84 Edgar E. Iglesias
471 5b6dd868 Blue Swirl
void cpu_exec_init(CPUArchState *env)
472 ea041c0e bellard
{
473 5b6dd868 Blue Swirl
    CPUState *cpu = ENV_GET_CPU(env);
474 b170fce3 Andreas Färber
    CPUClass *cc = CPU_GET_CLASS(cpu);
475 bdc44640 Andreas Färber
    CPUState *some_cpu;
476 5b6dd868 Blue Swirl
    int cpu_index;
477 5b6dd868 Blue Swirl
478 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
479 5b6dd868 Blue Swirl
    cpu_list_lock();
480 5b6dd868 Blue Swirl
#endif
481 5b6dd868 Blue Swirl
    cpu_index = 0;
482 bdc44640 Andreas Färber
    CPU_FOREACH(some_cpu) {
483 5b6dd868 Blue Swirl
        cpu_index++;
484 5b6dd868 Blue Swirl
    }
485 55e5c285 Andreas Färber
    cpu->cpu_index = cpu_index;
486 1b1ed8dc Andreas Färber
    cpu->numa_node = 0;
487 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
488 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
489 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
490 09daed84 Edgar E. Iglesias
    cpu->as = &address_space_memory;
491 5b6dd868 Blue Swirl
    cpu->thread_id = qemu_get_thread_id();
492 5b6dd868 Blue Swirl
#endif
493 bdc44640 Andreas Färber
    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
494 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
495 5b6dd868 Blue Swirl
    cpu_list_unlock();
496 5b6dd868 Blue Swirl
#endif
497 e0d47944 Andreas Färber
    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 e0d47944 Andreas Färber
        vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
499 e0d47944 Andreas Färber
    }
500 5b6dd868 Blue Swirl
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501 5b6dd868 Blue Swirl
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
502 5b6dd868 Blue Swirl
                    cpu_save, cpu_load, env);
503 b170fce3 Andreas Färber
    assert(cc->vmsd == NULL);
504 e0d47944 Andreas Färber
    assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
505 5b6dd868 Blue Swirl
#endif
506 b170fce3 Andreas Färber
    if (cc->vmsd != NULL) {
507 b170fce3 Andreas Färber
        vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
508 b170fce3 Andreas Färber
    }
509 ea041c0e bellard
}
510 ea041c0e bellard
511 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
512 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
513 00b941e5 Andreas Färber
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
514 94df27fd Paul Brook
{
515 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
516 94df27fd Paul Brook
}
517 94df27fd Paul Brook
#else
518 00b941e5 Andreas Färber
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
519 1e7855a5 Max Filippov
{
520 e8262a1b Max Filippov
    hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 e8262a1b Max Filippov
    if (phys != -1) {
522 09daed84 Edgar E. Iglesias
        tb_invalidate_phys_addr(cpu->as,
523 29d8ec7b Edgar E. Iglesias
                                phys | (pc & ~TARGET_PAGE_MASK));
524 e8262a1b Max Filippov
    }
525 1e7855a5 Max Filippov
}
526 c27004ec bellard
#endif
527 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
528 d720b93d bellard
529 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
530 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
531 c527ee8f Paul Brook
532 c527ee8f Paul Brook
{
533 c527ee8f Paul Brook
}
534 c527ee8f Paul Brook
535 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
536 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
537 c527ee8f Paul Brook
{
538 c527ee8f Paul Brook
    return -ENOSYS;
539 c527ee8f Paul Brook
}
540 c527ee8f Paul Brook
#else
541 6658ffb8 pbrook
/* Add a watchpoint.  */
542 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
543 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
544 6658ffb8 pbrook
{
545 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
546 c0ce998e aliguori
    CPUWatchpoint *wp;
547 6658ffb8 pbrook
548 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
549 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
550 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
551 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
552 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
553 b4051334 aliguori
        return -EINVAL;
554 b4051334 aliguori
    }
555 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
556 a1d1bb31 aliguori
557 a1d1bb31 aliguori
    wp->vaddr = addr;
558 b4051334 aliguori
    wp->len_mask = len_mask;
559 a1d1bb31 aliguori
    wp->flags = flags;
560 a1d1bb31 aliguori
561 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
562 c0ce998e aliguori
    if (flags & BP_GDB)
563 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
564 c0ce998e aliguori
    else
565 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
566 6658ffb8 pbrook
567 6658ffb8 pbrook
    tlb_flush_page(env, addr);
568 a1d1bb31 aliguori
569 a1d1bb31 aliguori
    if (watchpoint)
570 a1d1bb31 aliguori
        *watchpoint = wp;
571 a1d1bb31 aliguori
    return 0;
572 6658ffb8 pbrook
}
573 6658ffb8 pbrook
574 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
575 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
576 a1d1bb31 aliguori
                          int flags)
577 6658ffb8 pbrook
{
578 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
579 a1d1bb31 aliguori
    CPUWatchpoint *wp;
580 6658ffb8 pbrook
581 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
582 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
583 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
584 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
585 6658ffb8 pbrook
            return 0;
586 6658ffb8 pbrook
        }
587 6658ffb8 pbrook
    }
588 a1d1bb31 aliguori
    return -ENOENT;
589 6658ffb8 pbrook
}
590 6658ffb8 pbrook
591 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
592 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
593 a1d1bb31 aliguori
{
594 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
595 7d03f82f edgar_igl
596 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
597 a1d1bb31 aliguori
598 7267c094 Anthony Liguori
    g_free(watchpoint);
599 a1d1bb31 aliguori
}
600 a1d1bb31 aliguori
601 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
602 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
603 a1d1bb31 aliguori
{
604 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
605 a1d1bb31 aliguori
606 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
607 a1d1bb31 aliguori
        if (wp->flags & mask)
608 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
609 c0ce998e aliguori
    }
610 7d03f82f edgar_igl
}
611 c527ee8f Paul Brook
#endif
612 7d03f82f edgar_igl
613 a1d1bb31 aliguori
/* Add a breakpoint.  */
614 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
615 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
616 4c3a88a2 bellard
{
617 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
618 c0ce998e aliguori
    CPUBreakpoint *bp;
619 3b46e624 ths
620 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
621 4c3a88a2 bellard
622 a1d1bb31 aliguori
    bp->pc = pc;
623 a1d1bb31 aliguori
    bp->flags = flags;
624 a1d1bb31 aliguori
625 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
626 00b941e5 Andreas Färber
    if (flags & BP_GDB) {
627 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
628 00b941e5 Andreas Färber
    } else {
629 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
630 00b941e5 Andreas Färber
    }
631 3b46e624 ths
632 00b941e5 Andreas Färber
    breakpoint_invalidate(ENV_GET_CPU(env), pc);
633 a1d1bb31 aliguori
634 00b941e5 Andreas Färber
    if (breakpoint) {
635 a1d1bb31 aliguori
        *breakpoint = bp;
636 00b941e5 Andreas Färber
    }
637 4c3a88a2 bellard
    return 0;
638 4c3a88a2 bellard
#else
639 a1d1bb31 aliguori
    return -ENOSYS;
640 4c3a88a2 bellard
#endif
641 4c3a88a2 bellard
}
642 4c3a88a2 bellard
643 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
644 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
645 a1d1bb31 aliguori
{
646 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
647 a1d1bb31 aliguori
    CPUBreakpoint *bp;
648 a1d1bb31 aliguori
649 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
650 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
651 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
652 a1d1bb31 aliguori
            return 0;
653 a1d1bb31 aliguori
        }
654 7d03f82f edgar_igl
    }
655 a1d1bb31 aliguori
    return -ENOENT;
656 a1d1bb31 aliguori
#else
657 a1d1bb31 aliguori
    return -ENOSYS;
658 7d03f82f edgar_igl
#endif
659 7d03f82f edgar_igl
}
660 7d03f82f edgar_igl
661 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
662 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
663 4c3a88a2 bellard
{
664 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
665 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
666 d720b93d bellard
667 00b941e5 Andreas Färber
    breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
668 a1d1bb31 aliguori
669 7267c094 Anthony Liguori
    g_free(breakpoint);
670 a1d1bb31 aliguori
#endif
671 a1d1bb31 aliguori
}
672 a1d1bb31 aliguori
673 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
674 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
675 a1d1bb31 aliguori
{
676 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
677 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
678 a1d1bb31 aliguori
679 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
680 a1d1bb31 aliguori
        if (bp->flags & mask)
681 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
682 c0ce998e aliguori
    }
683 4c3a88a2 bellard
#endif
684 4c3a88a2 bellard
}
685 4c3a88a2 bellard
686 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
687 c33a346e bellard
   CPU loop after each instruction */
688 3825b28f Andreas Färber
void cpu_single_step(CPUState *cpu, int enabled)
689 c33a346e bellard
{
690 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
691 ed2803da Andreas Färber
    if (cpu->singlestep_enabled != enabled) {
692 ed2803da Andreas Färber
        cpu->singlestep_enabled = enabled;
693 ed2803da Andreas Färber
        if (kvm_enabled()) {
694 38e478ec Stefan Weil
            kvm_update_guest_debug(cpu, 0);
695 ed2803da Andreas Färber
        } else {
696 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
697 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
698 38e478ec Stefan Weil
            CPUArchState *env = cpu->env_ptr;
699 e22a25c9 aliguori
            tb_flush(env);
700 e22a25c9 aliguori
        }
701 c33a346e bellard
    }
702 c33a346e bellard
#endif
703 c33a346e bellard
}
704 c33a346e bellard
705 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
706 7501267e bellard
{
707 878096ee Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
708 7501267e bellard
    va_list ap;
709 493ae1f0 pbrook
    va_list ap2;
710 7501267e bellard
711 7501267e bellard
    va_start(ap, fmt);
712 493ae1f0 pbrook
    va_copy(ap2, ap);
713 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
714 7501267e bellard
    vfprintf(stderr, fmt, ap);
715 7501267e bellard
    fprintf(stderr, "\n");
716 878096ee Andreas Färber
    cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
717 93fcfe39 aliguori
    if (qemu_log_enabled()) {
718 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
719 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
720 93fcfe39 aliguori
        qemu_log("\n");
721 a0762859 Andreas Färber
        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
722 31b1a7b4 aliguori
        qemu_log_flush();
723 93fcfe39 aliguori
        qemu_log_close();
724 924edcae balrog
    }
725 493ae1f0 pbrook
    va_end(ap2);
726 f9373291 j_mayer
    va_end(ap);
727 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
728 fd052bf6 Riku Voipio
    {
729 fd052bf6 Riku Voipio
        struct sigaction act;
730 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
731 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
732 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
733 fd052bf6 Riku Voipio
    }
734 fd052bf6 Riku Voipio
#endif
735 7501267e bellard
    abort();
736 7501267e bellard
}
737 7501267e bellard
738 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
739 041603fe Paolo Bonzini
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
740 041603fe Paolo Bonzini
{
741 041603fe Paolo Bonzini
    RAMBlock *block;
742 041603fe Paolo Bonzini
743 041603fe Paolo Bonzini
    /* The list is protected by the iothread lock here.  */
744 041603fe Paolo Bonzini
    block = ram_list.mru_block;
745 041603fe Paolo Bonzini
    if (block && addr - block->offset < block->length) {
746 041603fe Paolo Bonzini
        goto found;
747 041603fe Paolo Bonzini
    }
748 041603fe Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
749 041603fe Paolo Bonzini
        if (addr - block->offset < block->length) {
750 041603fe Paolo Bonzini
            goto found;
751 041603fe Paolo Bonzini
        }
752 041603fe Paolo Bonzini
    }
753 041603fe Paolo Bonzini
754 041603fe Paolo Bonzini
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
755 041603fe Paolo Bonzini
    abort();
756 041603fe Paolo Bonzini
757 041603fe Paolo Bonzini
found:
758 041603fe Paolo Bonzini
    ram_list.mru_block = block;
759 041603fe Paolo Bonzini
    return block;
760 041603fe Paolo Bonzini
}
761 041603fe Paolo Bonzini
762 a2f4d5be Juan Quintela
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
763 d24981d3 Juan Quintela
{
764 041603fe Paolo Bonzini
    ram_addr_t start1;
765 a2f4d5be Juan Quintela
    RAMBlock *block;
766 a2f4d5be Juan Quintela
    ram_addr_t end;
767 a2f4d5be Juan Quintela
768 a2f4d5be Juan Quintela
    end = TARGET_PAGE_ALIGN(start + length);
769 a2f4d5be Juan Quintela
    start &= TARGET_PAGE_MASK;
770 d24981d3 Juan Quintela
771 041603fe Paolo Bonzini
    block = qemu_get_ram_block(start);
772 041603fe Paolo Bonzini
    assert(block == qemu_get_ram_block(end - 1));
773 041603fe Paolo Bonzini
    start1 = (uintptr_t)block->host + (start - block->offset);
774 041603fe Paolo Bonzini
    cpu_tlb_reset_dirty_all(start1, length);
775 d24981d3 Juan Quintela
}
776 d24981d3 Juan Quintela
777 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
778 a2f4d5be Juan Quintela
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
779 52159192 Juan Quintela
                                     unsigned client)
780 1ccde1cb bellard
{
781 1ccde1cb bellard
    if (length == 0)
782 1ccde1cb bellard
        return;
783 ace694cc Juan Quintela
    cpu_physical_memory_clear_dirty_range(start, length, client);
784 f23db169 bellard
785 d24981d3 Juan Quintela
    if (tcg_enabled()) {
786 a2f4d5be Juan Quintela
        tlb_reset_dirty_range_all(start, length);
787 5579c7f3 pbrook
    }
788 1ccde1cb bellard
}
789 1ccde1cb bellard
790 981fdf23 Juan Quintela
static void cpu_physical_memory_set_dirty_tracking(bool enable)
791 74576198 aliguori
{
792 74576198 aliguori
    in_migration = enable;
793 74576198 aliguori
}
794 74576198 aliguori
795 a8170e5e Avi Kivity
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
796 149f54b5 Paolo Bonzini
                                       MemoryRegionSection *section,
797 149f54b5 Paolo Bonzini
                                       target_ulong vaddr,
798 149f54b5 Paolo Bonzini
                                       hwaddr paddr, hwaddr xlat,
799 149f54b5 Paolo Bonzini
                                       int prot,
800 149f54b5 Paolo Bonzini
                                       target_ulong *address)
801 e5548617 Blue Swirl
{
802 a8170e5e Avi Kivity
    hwaddr iotlb;
803 e5548617 Blue Swirl
    CPUWatchpoint *wp;
804 e5548617 Blue Swirl
805 cc5bea60 Blue Swirl
    if (memory_region_is_ram(section->mr)) {
806 e5548617 Blue Swirl
        /* Normal RAM.  */
807 e5548617 Blue Swirl
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
808 149f54b5 Paolo Bonzini
            + xlat;
809 e5548617 Blue Swirl
        if (!section->readonly) {
810 b41aac4f Liu Ping Fan
            iotlb |= PHYS_SECTION_NOTDIRTY;
811 e5548617 Blue Swirl
        } else {
812 b41aac4f Liu Ping Fan
            iotlb |= PHYS_SECTION_ROM;
813 e5548617 Blue Swirl
        }
814 e5548617 Blue Swirl
    } else {
815 1b3fb98f Edgar E. Iglesias
        iotlb = section - section->address_space->dispatch->map.sections;
816 149f54b5 Paolo Bonzini
        iotlb += xlat;
817 e5548617 Blue Swirl
    }
818 e5548617 Blue Swirl
819 e5548617 Blue Swirl
    /* Make accesses to pages with watchpoints go via the
820 e5548617 Blue Swirl
       watchpoint trap routines.  */
821 e5548617 Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
822 e5548617 Blue Swirl
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
823 e5548617 Blue Swirl
            /* Avoid trapping reads of pages with a write breakpoint. */
824 e5548617 Blue Swirl
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
825 b41aac4f Liu Ping Fan
                iotlb = PHYS_SECTION_WATCH + paddr;
826 e5548617 Blue Swirl
                *address |= TLB_MMIO;
827 e5548617 Blue Swirl
                break;
828 e5548617 Blue Swirl
            }
829 e5548617 Blue Swirl
        }
830 e5548617 Blue Swirl
    }
831 e5548617 Blue Swirl
832 e5548617 Blue Swirl
    return iotlb;
833 e5548617 Blue Swirl
}
834 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
835 9fa3e853 bellard
836 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
837 8da3ff18 pbrook
838 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
839 5312bd8b Avi Kivity
                             uint16_t section);
840 acc9d80b Jan Kiszka
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
841 54688b1e Avi Kivity
842 575ddeb4 Stefan Weil
static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
843 91138037 Markus Armbruster
844 91138037 Markus Armbruster
/*
845 91138037 Markus Armbruster
 * Set a custom physical guest memory alloator.
846 91138037 Markus Armbruster
 * Accelerators with unusual needs may need this.  Hopefully, we can
847 91138037 Markus Armbruster
 * get rid of it eventually.
848 91138037 Markus Armbruster
 */
849 575ddeb4 Stefan Weil
void phys_mem_set_alloc(void *(*alloc)(size_t))
850 91138037 Markus Armbruster
{
851 91138037 Markus Armbruster
    phys_mem_alloc = alloc;
852 91138037 Markus Armbruster
}
853 91138037 Markus Armbruster
854 53cb28cb Marcel Apfelbaum
static uint16_t phys_section_add(PhysPageMap *map,
855 53cb28cb Marcel Apfelbaum
                                 MemoryRegionSection *section)
856 5312bd8b Avi Kivity
{
857 68f3f65b Paolo Bonzini
    /* The physical section number is ORed with a page-aligned
858 68f3f65b Paolo Bonzini
     * pointer to produce the iotlb entries.  Thus it should
859 68f3f65b Paolo Bonzini
     * never overflow into the page-aligned value.
860 68f3f65b Paolo Bonzini
     */
861 53cb28cb Marcel Apfelbaum
    assert(map->sections_nb < TARGET_PAGE_SIZE);
862 68f3f65b Paolo Bonzini
863 53cb28cb Marcel Apfelbaum
    if (map->sections_nb == map->sections_nb_alloc) {
864 53cb28cb Marcel Apfelbaum
        map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
865 53cb28cb Marcel Apfelbaum
        map->sections = g_renew(MemoryRegionSection, map->sections,
866 53cb28cb Marcel Apfelbaum
                                map->sections_nb_alloc);
867 5312bd8b Avi Kivity
    }
868 53cb28cb Marcel Apfelbaum
    map->sections[map->sections_nb] = *section;
869 dfde4e6e Paolo Bonzini
    memory_region_ref(section->mr);
870 53cb28cb Marcel Apfelbaum
    return map->sections_nb++;
871 5312bd8b Avi Kivity
}
872 5312bd8b Avi Kivity
873 058bc4b5 Paolo Bonzini
static void phys_section_destroy(MemoryRegion *mr)
874 058bc4b5 Paolo Bonzini
{
875 dfde4e6e Paolo Bonzini
    memory_region_unref(mr);
876 dfde4e6e Paolo Bonzini
877 058bc4b5 Paolo Bonzini
    if (mr->subpage) {
878 058bc4b5 Paolo Bonzini
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
879 058bc4b5 Paolo Bonzini
        memory_region_destroy(&subpage->iomem);
880 058bc4b5 Paolo Bonzini
        g_free(subpage);
881 058bc4b5 Paolo Bonzini
    }
882 058bc4b5 Paolo Bonzini
}
883 058bc4b5 Paolo Bonzini
884 6092666e Paolo Bonzini
static void phys_sections_free(PhysPageMap *map)
885 5312bd8b Avi Kivity
{
886 9affd6fc Paolo Bonzini
    while (map->sections_nb > 0) {
887 9affd6fc Paolo Bonzini
        MemoryRegionSection *section = &map->sections[--map->sections_nb];
888 058bc4b5 Paolo Bonzini
        phys_section_destroy(section->mr);
889 058bc4b5 Paolo Bonzini
    }
890 9affd6fc Paolo Bonzini
    g_free(map->sections);
891 9affd6fc Paolo Bonzini
    g_free(map->nodes);
892 5312bd8b Avi Kivity
}
893 5312bd8b Avi Kivity
894 ac1970fb Avi Kivity
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
895 0f0cb164 Avi Kivity
{
896 0f0cb164 Avi Kivity
    subpage_t *subpage;
897 a8170e5e Avi Kivity
    hwaddr base = section->offset_within_address_space
898 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
899 97115a8d Michael S. Tsirkin
    MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
900 53cb28cb Marcel Apfelbaum
                                                   d->map.nodes, d->map.sections);
901 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
902 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
903 052e87b0 Paolo Bonzini
        .size = int128_make64(TARGET_PAGE_SIZE),
904 0f0cb164 Avi Kivity
    };
905 a8170e5e Avi Kivity
    hwaddr start, end;
906 0f0cb164 Avi Kivity
907 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
908 0f0cb164 Avi Kivity
909 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
910 acc9d80b Jan Kiszka
        subpage = subpage_init(d->as, base);
911 3be91e86 Edgar E. Iglesias
        subsection.address_space = d->as;
912 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
913 ac1970fb Avi Kivity
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
914 53cb28cb Marcel Apfelbaum
                      phys_section_add(&d->map, &subsection));
915 0f0cb164 Avi Kivity
    } else {
916 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
917 0f0cb164 Avi Kivity
    }
918 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
919 052e87b0 Paolo Bonzini
    end = start + int128_get64(section->size) - 1;
920 53cb28cb Marcel Apfelbaum
    subpage_register(subpage, start, end,
921 53cb28cb Marcel Apfelbaum
                     phys_section_add(&d->map, section));
922 0f0cb164 Avi Kivity
}
923 0f0cb164 Avi Kivity
924 0f0cb164 Avi Kivity
925 052e87b0 Paolo Bonzini
static void register_multipage(AddressSpaceDispatch *d,
926 052e87b0 Paolo Bonzini
                               MemoryRegionSection *section)
927 33417e70 bellard
{
928 a8170e5e Avi Kivity
    hwaddr start_addr = section->offset_within_address_space;
929 53cb28cb Marcel Apfelbaum
    uint16_t section_index = phys_section_add(&d->map, section);
930 052e87b0 Paolo Bonzini
    uint64_t num_pages = int128_get64(int128_rshift(section->size,
931 052e87b0 Paolo Bonzini
                                                    TARGET_PAGE_BITS));
932 dd81124b Avi Kivity
933 733d5ef5 Paolo Bonzini
    assert(num_pages);
934 733d5ef5 Paolo Bonzini
    phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
935 33417e70 bellard
}
936 33417e70 bellard
937 ac1970fb Avi Kivity
static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
938 0f0cb164 Avi Kivity
{
939 89ae337a Paolo Bonzini
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
940 00752703 Paolo Bonzini
    AddressSpaceDispatch *d = as->next_dispatch;
941 99b9cc06 Paolo Bonzini
    MemoryRegionSection now = *section, remain = *section;
942 052e87b0 Paolo Bonzini
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
943 0f0cb164 Avi Kivity
944 733d5ef5 Paolo Bonzini
    if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
945 733d5ef5 Paolo Bonzini
        uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
946 733d5ef5 Paolo Bonzini
                       - now.offset_within_address_space;
947 733d5ef5 Paolo Bonzini
948 052e87b0 Paolo Bonzini
        now.size = int128_min(int128_make64(left), now.size);
949 ac1970fb Avi Kivity
        register_subpage(d, &now);
950 733d5ef5 Paolo Bonzini
    } else {
951 052e87b0 Paolo Bonzini
        now.size = int128_zero();
952 733d5ef5 Paolo Bonzini
    }
953 052e87b0 Paolo Bonzini
    while (int128_ne(remain.size, now.size)) {
954 052e87b0 Paolo Bonzini
        remain.size = int128_sub(remain.size, now.size);
955 052e87b0 Paolo Bonzini
        remain.offset_within_address_space += int128_get64(now.size);
956 052e87b0 Paolo Bonzini
        remain.offset_within_region += int128_get64(now.size);
957 69b67646 Tyler Hall
        now = remain;
958 052e87b0 Paolo Bonzini
        if (int128_lt(remain.size, page_size)) {
959 733d5ef5 Paolo Bonzini
            register_subpage(d, &now);
960 88266249 Hu Tao
        } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
961 052e87b0 Paolo Bonzini
            now.size = page_size;
962 ac1970fb Avi Kivity
            register_subpage(d, &now);
963 69b67646 Tyler Hall
        } else {
964 052e87b0 Paolo Bonzini
            now.size = int128_and(now.size, int128_neg(page_size));
965 ac1970fb Avi Kivity
            register_multipage(d, &now);
966 69b67646 Tyler Hall
        }
967 0f0cb164 Avi Kivity
    }
968 0f0cb164 Avi Kivity
}
969 0f0cb164 Avi Kivity
970 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
971 62a2744c Sheng Yang
{
972 62a2744c Sheng Yang
    if (kvm_enabled())
973 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
974 62a2744c Sheng Yang
}
975 62a2744c Sheng Yang
976 b2a8658e Umesh Deshpande
void qemu_mutex_lock_ramlist(void)
977 b2a8658e Umesh Deshpande
{
978 b2a8658e Umesh Deshpande
    qemu_mutex_lock(&ram_list.mutex);
979 b2a8658e Umesh Deshpande
}
980 b2a8658e Umesh Deshpande
981 b2a8658e Umesh Deshpande
void qemu_mutex_unlock_ramlist(void)
982 b2a8658e Umesh Deshpande
{
983 b2a8658e Umesh Deshpande
    qemu_mutex_unlock(&ram_list.mutex);
984 b2a8658e Umesh Deshpande
}
985 b2a8658e Umesh Deshpande
986 e1e84ba0 Markus Armbruster
#ifdef __linux__
987 c902760f Marcelo Tosatti
988 c902760f Marcelo Tosatti
#include <sys/vfs.h>
989 c902760f Marcelo Tosatti
990 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
991 c902760f Marcelo Tosatti
992 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
993 c902760f Marcelo Tosatti
{
994 c902760f Marcelo Tosatti
    struct statfs fs;
995 c902760f Marcelo Tosatti
    int ret;
996 c902760f Marcelo Tosatti
997 c902760f Marcelo Tosatti
    do {
998 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
999 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
1000 c902760f Marcelo Tosatti
1001 c902760f Marcelo Tosatti
    if (ret != 0) {
1002 9742bf26 Yoshiaki Tamura
        perror(path);
1003 9742bf26 Yoshiaki Tamura
        return 0;
1004 c902760f Marcelo Tosatti
    }
1005 c902760f Marcelo Tosatti
1006 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
1007 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
1008 c902760f Marcelo Tosatti
1009 c902760f Marcelo Tosatti
    return fs.f_bsize;
1010 c902760f Marcelo Tosatti
}
1011 c902760f Marcelo Tosatti
1012 ef36fa14 Marcelo Tosatti
static sigjmp_buf sigjump;
1013 ef36fa14 Marcelo Tosatti
1014 ef36fa14 Marcelo Tosatti
static void sigbus_handler(int signal)
1015 ef36fa14 Marcelo Tosatti
{
1016 ef36fa14 Marcelo Tosatti
    siglongjmp(sigjump, 1);
1017 ef36fa14 Marcelo Tosatti
}
1018 ef36fa14 Marcelo Tosatti
1019 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
1020 04b16653 Alex Williamson
                            ram_addr_t memory,
1021 04b16653 Alex Williamson
                            const char *path)
1022 c902760f Marcelo Tosatti
{
1023 c902760f Marcelo Tosatti
    char *filename;
1024 8ca761f6 Peter Feiner
    char *sanitized_name;
1025 8ca761f6 Peter Feiner
    char *c;
1026 c902760f Marcelo Tosatti
    void *area;
1027 c902760f Marcelo Tosatti
    int fd;
1028 c902760f Marcelo Tosatti
    unsigned long hpagesize;
1029 c902760f Marcelo Tosatti
1030 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
1031 c902760f Marcelo Tosatti
    if (!hpagesize) {
1032 9742bf26 Yoshiaki Tamura
        return NULL;
1033 c902760f Marcelo Tosatti
    }
1034 c902760f Marcelo Tosatti
1035 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
1036 c902760f Marcelo Tosatti
        return NULL;
1037 c902760f Marcelo Tosatti
    }
1038 c902760f Marcelo Tosatti
1039 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
1040 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1041 c902760f Marcelo Tosatti
        return NULL;
1042 c902760f Marcelo Tosatti
    }
1043 c902760f Marcelo Tosatti
1044 8ca761f6 Peter Feiner
    /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1045 8ca761f6 Peter Feiner
    sanitized_name = g_strdup(block->mr->name);
1046 8ca761f6 Peter Feiner
    for (c = sanitized_name; *c != '\0'; c++) {
1047 8ca761f6 Peter Feiner
        if (*c == '/')
1048 8ca761f6 Peter Feiner
            *c = '_';
1049 8ca761f6 Peter Feiner
    }
1050 8ca761f6 Peter Feiner
1051 8ca761f6 Peter Feiner
    filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1052 8ca761f6 Peter Feiner
                               sanitized_name);
1053 8ca761f6 Peter Feiner
    g_free(sanitized_name);
1054 c902760f Marcelo Tosatti
1055 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
1056 c902760f Marcelo Tosatti
    if (fd < 0) {
1057 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
1058 e4ada482 Stefan Weil
        g_free(filename);
1059 9742bf26 Yoshiaki Tamura
        return NULL;
1060 c902760f Marcelo Tosatti
    }
1061 c902760f Marcelo Tosatti
    unlink(filename);
1062 e4ada482 Stefan Weil
    g_free(filename);
1063 c902760f Marcelo Tosatti
1064 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
1065 c902760f Marcelo Tosatti
1066 c902760f Marcelo Tosatti
    /*
1067 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
1068 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
1069 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
1070 c902760f Marcelo Tosatti
     * mmap will fail.
1071 c902760f Marcelo Tosatti
     */
1072 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
1073 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
1074 c902760f Marcelo Tosatti
1075 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1076 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
1077 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
1078 9742bf26 Yoshiaki Tamura
        close(fd);
1079 9742bf26 Yoshiaki Tamura
        return (NULL);
1080 c902760f Marcelo Tosatti
    }
1081 ef36fa14 Marcelo Tosatti
1082 ef36fa14 Marcelo Tosatti
    if (mem_prealloc) {
1083 ef36fa14 Marcelo Tosatti
        int ret, i;
1084 ef36fa14 Marcelo Tosatti
        struct sigaction act, oldact;
1085 ef36fa14 Marcelo Tosatti
        sigset_t set, oldset;
1086 ef36fa14 Marcelo Tosatti
1087 ef36fa14 Marcelo Tosatti
        memset(&act, 0, sizeof(act));
1088 ef36fa14 Marcelo Tosatti
        act.sa_handler = &sigbus_handler;
1089 ef36fa14 Marcelo Tosatti
        act.sa_flags = 0;
1090 ef36fa14 Marcelo Tosatti
1091 ef36fa14 Marcelo Tosatti
        ret = sigaction(SIGBUS, &act, &oldact);
1092 ef36fa14 Marcelo Tosatti
        if (ret) {
1093 ef36fa14 Marcelo Tosatti
            perror("file_ram_alloc: failed to install signal handler");
1094 ef36fa14 Marcelo Tosatti
            exit(1);
1095 ef36fa14 Marcelo Tosatti
        }
1096 ef36fa14 Marcelo Tosatti
1097 ef36fa14 Marcelo Tosatti
        /* unblock SIGBUS */
1098 ef36fa14 Marcelo Tosatti
        sigemptyset(&set);
1099 ef36fa14 Marcelo Tosatti
        sigaddset(&set, SIGBUS);
1100 ef36fa14 Marcelo Tosatti
        pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1101 ef36fa14 Marcelo Tosatti
1102 ef36fa14 Marcelo Tosatti
        if (sigsetjmp(sigjump, 1)) {
1103 ef36fa14 Marcelo Tosatti
            fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1104 ef36fa14 Marcelo Tosatti
            exit(1);
1105 ef36fa14 Marcelo Tosatti
        }
1106 ef36fa14 Marcelo Tosatti
1107 ef36fa14 Marcelo Tosatti
        /* MAP_POPULATE silently ignores failures */
1108 2ba82852 Marcelo Tosatti
        for (i = 0; i < (memory/hpagesize); i++) {
1109 ef36fa14 Marcelo Tosatti
            memset(area + (hpagesize*i), 0, 1);
1110 ef36fa14 Marcelo Tosatti
        }
1111 ef36fa14 Marcelo Tosatti
1112 ef36fa14 Marcelo Tosatti
        ret = sigaction(SIGBUS, &oldact, NULL);
1113 ef36fa14 Marcelo Tosatti
        if (ret) {
1114 ef36fa14 Marcelo Tosatti
            perror("file_ram_alloc: failed to reinstall signal handler");
1115 ef36fa14 Marcelo Tosatti
            exit(1);
1116 ef36fa14 Marcelo Tosatti
        }
1117 ef36fa14 Marcelo Tosatti
1118 ef36fa14 Marcelo Tosatti
        pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1119 ef36fa14 Marcelo Tosatti
    }
1120 ef36fa14 Marcelo Tosatti
1121 04b16653 Alex Williamson
    block->fd = fd;
1122 c902760f Marcelo Tosatti
    return area;
1123 c902760f Marcelo Tosatti
}
1124 e1e84ba0 Markus Armbruster
#else
1125 e1e84ba0 Markus Armbruster
static void *file_ram_alloc(RAMBlock *block,
1126 e1e84ba0 Markus Armbruster
                            ram_addr_t memory,
1127 e1e84ba0 Markus Armbruster
                            const char *path)
1128 e1e84ba0 Markus Armbruster
{
1129 e1e84ba0 Markus Armbruster
    fprintf(stderr, "-mem-path not supported on this host\n");
1130 e1e84ba0 Markus Armbruster
    exit(1);
1131 e1e84ba0 Markus Armbruster
}
1132 c902760f Marcelo Tosatti
#endif
1133 c902760f Marcelo Tosatti
1134 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
1135 d17b5288 Alex Williamson
{
1136 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
1137 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1138 04b16653 Alex Williamson
1139 49cd9ac6 Stefan Hajnoczi
    assert(size != 0); /* it would hand out same offset multiple times */
1140 49cd9ac6 Stefan Hajnoczi
1141 a3161038 Paolo Bonzini
    if (QTAILQ_EMPTY(&ram_list.blocks))
1142 04b16653 Alex Williamson
        return 0;
1143 04b16653 Alex Williamson
1144 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1145 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
1146 04b16653 Alex Williamson
1147 04b16653 Alex Williamson
        end = block->offset + block->length;
1148 04b16653 Alex Williamson
1149 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1150 04b16653 Alex Williamson
            if (next_block->offset >= end) {
1151 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
1152 04b16653 Alex Williamson
            }
1153 04b16653 Alex Williamson
        }
1154 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
1155 3e837b2c Alex Williamson
            offset = end;
1156 04b16653 Alex Williamson
            mingap = next - end;
1157 04b16653 Alex Williamson
        }
1158 04b16653 Alex Williamson
    }
1159 3e837b2c Alex Williamson
1160 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
1161 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1162 3e837b2c Alex Williamson
                (uint64_t)size);
1163 3e837b2c Alex Williamson
        abort();
1164 3e837b2c Alex Williamson
    }
1165 3e837b2c Alex Williamson
1166 04b16653 Alex Williamson
    return offset;
1167 04b16653 Alex Williamson
}
1168 04b16653 Alex Williamson
1169 652d7ec2 Juan Quintela
ram_addr_t last_ram_offset(void)
1170 04b16653 Alex Williamson
{
1171 d17b5288 Alex Williamson
    RAMBlock *block;
1172 d17b5288 Alex Williamson
    ram_addr_t last = 0;
1173 d17b5288 Alex Williamson
1174 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
1175 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
1176 d17b5288 Alex Williamson
1177 d17b5288 Alex Williamson
    return last;
1178 d17b5288 Alex Williamson
}
1179 d17b5288 Alex Williamson
1180 ddb97f1d Jason Baron
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1181 ddb97f1d Jason Baron
{
1182 ddb97f1d Jason Baron
    int ret;
1183 ddb97f1d Jason Baron
1184 ddb97f1d Jason Baron
    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1185 2ff3de68 Markus Armbruster
    if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1186 2ff3de68 Markus Armbruster
                           "dump-guest-core", true)) {
1187 ddb97f1d Jason Baron
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1188 ddb97f1d Jason Baron
        if (ret) {
1189 ddb97f1d Jason Baron
            perror("qemu_madvise");
1190 ddb97f1d Jason Baron
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1191 ddb97f1d Jason Baron
                            "but dump_guest_core=off specified\n");
1192 ddb97f1d Jason Baron
        }
1193 ddb97f1d Jason Baron
    }
1194 ddb97f1d Jason Baron
}
1195 ddb97f1d Jason Baron
1196 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1197 84b89d78 Cam Macdonell
{
1198 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
1199 84b89d78 Cam Macdonell
1200 c5705a77 Avi Kivity
    new_block = NULL;
1201 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1202 c5705a77 Avi Kivity
        if (block->offset == addr) {
1203 c5705a77 Avi Kivity
            new_block = block;
1204 c5705a77 Avi Kivity
            break;
1205 c5705a77 Avi Kivity
        }
1206 c5705a77 Avi Kivity
    }
1207 c5705a77 Avi Kivity
    assert(new_block);
1208 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
1209 84b89d78 Cam Macdonell
1210 09e5ab63 Anthony Liguori
    if (dev) {
1211 09e5ab63 Anthony Liguori
        char *id = qdev_get_dev_path(dev);
1212 84b89d78 Cam Macdonell
        if (id) {
1213 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1214 7267c094 Anthony Liguori
            g_free(id);
1215 84b89d78 Cam Macdonell
        }
1216 84b89d78 Cam Macdonell
    }
1217 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1218 84b89d78 Cam Macdonell
1219 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1220 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1221 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1222 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1223 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1224 84b89d78 Cam Macdonell
                    new_block->idstr);
1225 84b89d78 Cam Macdonell
            abort();
1226 84b89d78 Cam Macdonell
        }
1227 84b89d78 Cam Macdonell
    }
1228 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1229 c5705a77 Avi Kivity
}
1230 c5705a77 Avi Kivity
1231 8490fc78 Luiz Capitulino
static int memory_try_enable_merging(void *addr, size_t len)
1232 8490fc78 Luiz Capitulino
{
1233 2ff3de68 Markus Armbruster
    if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1234 8490fc78 Luiz Capitulino
        /* disabled by the user */
1235 8490fc78 Luiz Capitulino
        return 0;
1236 8490fc78 Luiz Capitulino
    }
1237 8490fc78 Luiz Capitulino
1238 8490fc78 Luiz Capitulino
    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1239 8490fc78 Luiz Capitulino
}
1240 8490fc78 Luiz Capitulino
1241 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1242 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
1243 c5705a77 Avi Kivity
{
1244 abb26d63 Paolo Bonzini
    RAMBlock *block, *new_block;
1245 2152f5ca Juan Quintela
    ram_addr_t old_ram_size, new_ram_size;
1246 2152f5ca Juan Quintela
1247 2152f5ca Juan Quintela
    old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1248 c5705a77 Avi Kivity
1249 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
1250 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
1251 3435f395 Markus Armbruster
    new_block->fd = -1;
1252 84b89d78 Cam Macdonell
1253 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1254 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1255 7c637366 Avi Kivity
    new_block->mr = mr;
1256 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
1257 6977dfe6 Yoshiaki Tamura
    if (host) {
1258 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
1259 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
1260 dfeaf2ab Markus Armbruster
    } else if (xen_enabled()) {
1261 dfeaf2ab Markus Armbruster
        if (mem_path) {
1262 dfeaf2ab Markus Armbruster
            fprintf(stderr, "-mem-path not supported with Xen\n");
1263 dfeaf2ab Markus Armbruster
            exit(1);
1264 dfeaf2ab Markus Armbruster
        }
1265 dfeaf2ab Markus Armbruster
        xen_ram_alloc(new_block->offset, size, mr);
1266 6977dfe6 Yoshiaki Tamura
    } else {
1267 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
1268 e1e84ba0 Markus Armbruster
            if (phys_mem_alloc != qemu_anon_ram_alloc) {
1269 e1e84ba0 Markus Armbruster
                /*
1270 e1e84ba0 Markus Armbruster
                 * file_ram_alloc() needs to allocate just like
1271 e1e84ba0 Markus Armbruster
                 * phys_mem_alloc, but we haven't bothered to provide
1272 e1e84ba0 Markus Armbruster
                 * a hook there.
1273 e1e84ba0 Markus Armbruster
                 */
1274 e1e84ba0 Markus Armbruster
                fprintf(stderr,
1275 e1e84ba0 Markus Armbruster
                        "-mem-path not supported with this accelerator\n");
1276 e1e84ba0 Markus Armbruster
                exit(1);
1277 e1e84ba0 Markus Armbruster
            }
1278 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
1279 0628c182 Markus Armbruster
        }
1280 0628c182 Markus Armbruster
        if (!new_block->host) {
1281 91138037 Markus Armbruster
            new_block->host = phys_mem_alloc(size);
1282 39228250 Markus Armbruster
            if (!new_block->host) {
1283 39228250 Markus Armbruster
                fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1284 39228250 Markus Armbruster
                        new_block->mr->name, strerror(errno));
1285 39228250 Markus Armbruster
                exit(1);
1286 39228250 Markus Armbruster
            }
1287 8490fc78 Luiz Capitulino
            memory_try_enable_merging(new_block->host, size);
1288 6977dfe6 Yoshiaki Tamura
        }
1289 c902760f Marcelo Tosatti
    }
1290 94a6b54f pbrook
    new_block->length = size;
1291 94a6b54f pbrook
1292 abb26d63 Paolo Bonzini
    /* Keep the list sorted from biggest to smallest block.  */
1293 abb26d63 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1294 abb26d63 Paolo Bonzini
        if (block->length < new_block->length) {
1295 abb26d63 Paolo Bonzini
            break;
1296 abb26d63 Paolo Bonzini
        }
1297 abb26d63 Paolo Bonzini
    }
1298 abb26d63 Paolo Bonzini
    if (block) {
1299 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_BEFORE(block, new_block, next);
1300 abb26d63 Paolo Bonzini
    } else {
1301 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1302 abb26d63 Paolo Bonzini
    }
1303 0d6d3c87 Paolo Bonzini
    ram_list.mru_block = NULL;
1304 94a6b54f pbrook
1305 f798b07f Umesh Deshpande
    ram_list.version++;
1306 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1307 f798b07f Umesh Deshpande
1308 2152f5ca Juan Quintela
    new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1309 2152f5ca Juan Quintela
1310 2152f5ca Juan Quintela
    if (new_ram_size > old_ram_size) {
1311 1ab4c8ce Juan Quintela
        int i;
1312 1ab4c8ce Juan Quintela
        for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1313 1ab4c8ce Juan Quintela
            ram_list.dirty_memory[i] =
1314 1ab4c8ce Juan Quintela
                bitmap_zero_extend(ram_list.dirty_memory[i],
1315 1ab4c8ce Juan Quintela
                                   old_ram_size, new_ram_size);
1316 1ab4c8ce Juan Quintela
       }
1317 2152f5ca Juan Quintela
    }
1318 75218e7f Juan Quintela
    cpu_physical_memory_set_dirty_range(new_block->offset, size);
1319 94a6b54f pbrook
1320 ddb97f1d Jason Baron
    qemu_ram_setup_dump(new_block->host, size);
1321 ad0b5321 Luiz Capitulino
    qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1322 3e469dbf Andrea Arcangeli
    qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1323 ddb97f1d Jason Baron
1324 6f0437e8 Jan Kiszka
    if (kvm_enabled())
1325 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
1326 6f0437e8 Jan Kiszka
1327 94a6b54f pbrook
    return new_block->offset;
1328 94a6b54f pbrook
}
1329 e9a1ab19 bellard
1330 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1331 6977dfe6 Yoshiaki Tamura
{
1332 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
1333 6977dfe6 Yoshiaki Tamura
}
1334 6977dfe6 Yoshiaki Tamura
1335 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
1336 1f2e98b6 Alex Williamson
{
1337 1f2e98b6 Alex Williamson
    RAMBlock *block;
1338 1f2e98b6 Alex Williamson
1339 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1340 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1341 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1342 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
1343 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1344 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1345 f798b07f Umesh Deshpande
            ram_list.version++;
1346 7267c094 Anthony Liguori
            g_free(block);
1347 b2a8658e Umesh Deshpande
            break;
1348 1f2e98b6 Alex Williamson
        }
1349 1f2e98b6 Alex Williamson
    }
1350 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1351 1f2e98b6 Alex Williamson
}
1352 1f2e98b6 Alex Williamson
1353 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
1354 e9a1ab19 bellard
{
1355 04b16653 Alex Williamson
    RAMBlock *block;
1356 04b16653 Alex Williamson
1357 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1358 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1359 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1360 04b16653 Alex Williamson
        if (addr == block->offset) {
1361 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1362 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1363 f798b07f Umesh Deshpande
            ram_list.version++;
1364 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1365 cd19cfa2 Huang Ying
                ;
1366 dfeaf2ab Markus Armbruster
            } else if (xen_enabled()) {
1367 dfeaf2ab Markus Armbruster
                xen_invalidate_map_cache_entry(block->host);
1368 089f3f76 Stefan Weil
#ifndef _WIN32
1369 3435f395 Markus Armbruster
            } else if (block->fd >= 0) {
1370 3435f395 Markus Armbruster
                munmap(block->host, block->length);
1371 3435f395 Markus Armbruster
                close(block->fd);
1372 089f3f76 Stefan Weil
#endif
1373 04b16653 Alex Williamson
            } else {
1374 dfeaf2ab Markus Armbruster
                qemu_anon_ram_free(block->host, block->length);
1375 04b16653 Alex Williamson
            }
1376 7267c094 Anthony Liguori
            g_free(block);
1377 b2a8658e Umesh Deshpande
            break;
1378 04b16653 Alex Williamson
        }
1379 04b16653 Alex Williamson
    }
1380 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1381 04b16653 Alex Williamson
1382 e9a1ab19 bellard
}
1383 e9a1ab19 bellard
1384 cd19cfa2 Huang Ying
#ifndef _WIN32
1385 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1386 cd19cfa2 Huang Ying
{
1387 cd19cfa2 Huang Ying
    RAMBlock *block;
1388 cd19cfa2 Huang Ying
    ram_addr_t offset;
1389 cd19cfa2 Huang Ying
    int flags;
1390 cd19cfa2 Huang Ying
    void *area, *vaddr;
1391 cd19cfa2 Huang Ying
1392 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1393 cd19cfa2 Huang Ying
        offset = addr - block->offset;
1394 cd19cfa2 Huang Ying
        if (offset < block->length) {
1395 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
1396 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1397 cd19cfa2 Huang Ying
                ;
1398 dfeaf2ab Markus Armbruster
            } else if (xen_enabled()) {
1399 dfeaf2ab Markus Armbruster
                abort();
1400 cd19cfa2 Huang Ying
            } else {
1401 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
1402 cd19cfa2 Huang Ying
                munmap(vaddr, length);
1403 3435f395 Markus Armbruster
                if (block->fd >= 0) {
1404 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
1405 3435f395 Markus Armbruster
                    flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1406 3435f395 Markus Armbruster
                        MAP_PRIVATE;
1407 fd28aa13 Jan Kiszka
#else
1408 3435f395 Markus Armbruster
                    flags |= MAP_PRIVATE;
1409 cd19cfa2 Huang Ying
#endif
1410 3435f395 Markus Armbruster
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1411 3435f395 Markus Armbruster
                                flags, block->fd, offset);
1412 cd19cfa2 Huang Ying
                } else {
1413 2eb9fbaa Markus Armbruster
                    /*
1414 2eb9fbaa Markus Armbruster
                     * Remap needs to match alloc.  Accelerators that
1415 2eb9fbaa Markus Armbruster
                     * set phys_mem_alloc never remap.  If they did,
1416 2eb9fbaa Markus Armbruster
                     * we'd need a remap hook here.
1417 2eb9fbaa Markus Armbruster
                     */
1418 2eb9fbaa Markus Armbruster
                    assert(phys_mem_alloc == qemu_anon_ram_alloc);
1419 2eb9fbaa Markus Armbruster
1420 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1421 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1422 cd19cfa2 Huang Ying
                                flags, -1, 0);
1423 cd19cfa2 Huang Ying
                }
1424 cd19cfa2 Huang Ying
                if (area != vaddr) {
1425 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
1426 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1427 cd19cfa2 Huang Ying
                            length, addr);
1428 cd19cfa2 Huang Ying
                    exit(1);
1429 cd19cfa2 Huang Ying
                }
1430 8490fc78 Luiz Capitulino
                memory_try_enable_merging(vaddr, length);
1431 ddb97f1d Jason Baron
                qemu_ram_setup_dump(vaddr, length);
1432 cd19cfa2 Huang Ying
            }
1433 cd19cfa2 Huang Ying
            return;
1434 cd19cfa2 Huang Ying
        }
1435 cd19cfa2 Huang Ying
    }
1436 cd19cfa2 Huang Ying
}
1437 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
1438 cd19cfa2 Huang Ying
1439 1b5ec234 Paolo Bonzini
/* Return a host pointer to ram allocated with qemu_ram_alloc.
1440 1b5ec234 Paolo Bonzini
   With the exception of the softmmu code in this file, this should
1441 1b5ec234 Paolo Bonzini
   only be used for local memory (e.g. video ram) that the device owns,
1442 1b5ec234 Paolo Bonzini
   and knows it isn't going to access beyond the end of the block.
1443 1b5ec234 Paolo Bonzini

1444 1b5ec234 Paolo Bonzini
   It should not be used for general purpose DMA.
1445 1b5ec234 Paolo Bonzini
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1446 1b5ec234 Paolo Bonzini
 */
1447 1b5ec234 Paolo Bonzini
void *qemu_get_ram_ptr(ram_addr_t addr)
1448 1b5ec234 Paolo Bonzini
{
1449 1b5ec234 Paolo Bonzini
    RAMBlock *block = qemu_get_ram_block(addr);
1450 1b5ec234 Paolo Bonzini
1451 0d6d3c87 Paolo Bonzini
    if (xen_enabled()) {
1452 0d6d3c87 Paolo Bonzini
        /* We need to check if the requested address is in the RAM
1453 0d6d3c87 Paolo Bonzini
         * because we don't want to map the entire memory in QEMU.
1454 0d6d3c87 Paolo Bonzini
         * In that case just map until the end of the page.
1455 0d6d3c87 Paolo Bonzini
         */
1456 0d6d3c87 Paolo Bonzini
        if (block->offset == 0) {
1457 0d6d3c87 Paolo Bonzini
            return xen_map_cache(addr, 0, 0);
1458 0d6d3c87 Paolo Bonzini
        } else if (block->host == NULL) {
1459 0d6d3c87 Paolo Bonzini
            block->host =
1460 0d6d3c87 Paolo Bonzini
                xen_map_cache(block->offset, block->length, 1);
1461 0d6d3c87 Paolo Bonzini
        }
1462 0d6d3c87 Paolo Bonzini
    }
1463 0d6d3c87 Paolo Bonzini
    return block->host + (addr - block->offset);
1464 dc828ca1 pbrook
}
1465 dc828ca1 pbrook
1466 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1467 38bee5dc Stefano Stabellini
 * but takes a size argument */
1468 cb85f7ab Peter Maydell
static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1469 38bee5dc Stefano Stabellini
{
1470 8ab934f9 Stefano Stabellini
    if (*size == 0) {
1471 8ab934f9 Stefano Stabellini
        return NULL;
1472 8ab934f9 Stefano Stabellini
    }
1473 868bb33f Jan Kiszka
    if (xen_enabled()) {
1474 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
1475 868bb33f Jan Kiszka
    } else {
1476 38bee5dc Stefano Stabellini
        RAMBlock *block;
1477 38bee5dc Stefano Stabellini
1478 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1479 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
1480 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
1481 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
1482 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
1483 38bee5dc Stefano Stabellini
            }
1484 38bee5dc Stefano Stabellini
        }
1485 38bee5dc Stefano Stabellini
1486 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1487 38bee5dc Stefano Stabellini
        abort();
1488 38bee5dc Stefano Stabellini
    }
1489 38bee5dc Stefano Stabellini
}
1490 38bee5dc Stefano Stabellini
1491 7443b437 Paolo Bonzini
/* Some of the softmmu routines need to translate from a host pointer
1492 7443b437 Paolo Bonzini
   (typically a TLB entry) back to a ram offset.  */
1493 1b5ec234 Paolo Bonzini
MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1494 5579c7f3 pbrook
{
1495 94a6b54f pbrook
    RAMBlock *block;
1496 94a6b54f pbrook
    uint8_t *host = ptr;
1497 94a6b54f pbrook
1498 868bb33f Jan Kiszka
    if (xen_enabled()) {
1499 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
1500 1b5ec234 Paolo Bonzini
        return qemu_get_ram_block(*ram_addr)->mr;
1501 712c2b41 Stefano Stabellini
    }
1502 712c2b41 Stefano Stabellini
1503 23887b79 Paolo Bonzini
    block = ram_list.mru_block;
1504 23887b79 Paolo Bonzini
    if (block && block->host && host - block->host < block->length) {
1505 23887b79 Paolo Bonzini
        goto found;
1506 23887b79 Paolo Bonzini
    }
1507 23887b79 Paolo Bonzini
1508 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1509 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
1510 432d268c Jun Nakajima
        if (block->host == NULL) {
1511 432d268c Jun Nakajima
            continue;
1512 432d268c Jun Nakajima
        }
1513 f471a17e Alex Williamson
        if (host - block->host < block->length) {
1514 23887b79 Paolo Bonzini
            goto found;
1515 f471a17e Alex Williamson
        }
1516 94a6b54f pbrook
    }
1517 432d268c Jun Nakajima
1518 1b5ec234 Paolo Bonzini
    return NULL;
1519 23887b79 Paolo Bonzini
1520 23887b79 Paolo Bonzini
found:
1521 23887b79 Paolo Bonzini
    *ram_addr = block->offset + (host - block->host);
1522 1b5ec234 Paolo Bonzini
    return block->mr;
1523 e890261f Marcelo Tosatti
}
1524 f471a17e Alex Williamson
1525 a8170e5e Avi Kivity
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1526 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
1527 9fa3e853 bellard
{
1528 52159192 Juan Quintela
    if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1529 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
1530 3a7d929e bellard
    }
1531 0e0df1e2 Avi Kivity
    switch (size) {
1532 0e0df1e2 Avi Kivity
    case 1:
1533 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
1534 0e0df1e2 Avi Kivity
        break;
1535 0e0df1e2 Avi Kivity
    case 2:
1536 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
1537 0e0df1e2 Avi Kivity
        break;
1538 0e0df1e2 Avi Kivity
    case 4:
1539 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
1540 0e0df1e2 Avi Kivity
        break;
1541 0e0df1e2 Avi Kivity
    default:
1542 0e0df1e2 Avi Kivity
        abort();
1543 3a7d929e bellard
    }
1544 52159192 Juan Quintela
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1545 52159192 Juan Quintela
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
1546 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
1547 f23db169 bellard
       flushed */
1548 a2cd8c85 Juan Quintela
    if (!cpu_physical_memory_is_clean(ram_addr)) {
1549 4917cf44 Andreas Färber
        CPUArchState *env = current_cpu->env_ptr;
1550 4917cf44 Andreas Färber
        tlb_set_dirty(env, env->mem_io_vaddr);
1551 4917cf44 Andreas Färber
    }
1552 9fa3e853 bellard
}
1553 9fa3e853 bellard
1554 b018ddf6 Paolo Bonzini
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1555 b018ddf6 Paolo Bonzini
                                 unsigned size, bool is_write)
1556 b018ddf6 Paolo Bonzini
{
1557 b018ddf6 Paolo Bonzini
    return is_write;
1558 b018ddf6 Paolo Bonzini
}
1559 b018ddf6 Paolo Bonzini
1560 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
1561 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
1562 b018ddf6 Paolo Bonzini
    .valid.accepts = notdirty_mem_accepts,
1563 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1564 1ccde1cb bellard
};
1565 1ccde1cb bellard
1566 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
1567 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
1568 0f459d16 pbrook
{
1569 4917cf44 Andreas Färber
    CPUArchState *env = current_cpu->env_ptr;
1570 06d55cc1 aliguori
    target_ulong pc, cs_base;
1571 0f459d16 pbrook
    target_ulong vaddr;
1572 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1573 06d55cc1 aliguori
    int cpu_flags;
1574 0f459d16 pbrook
1575 06d55cc1 aliguori
    if (env->watchpoint_hit) {
1576 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
1577 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
1578 06d55cc1 aliguori
         * current instruction. */
1579 c3affe56 Andreas Färber
        cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1580 06d55cc1 aliguori
        return;
1581 06d55cc1 aliguori
    }
1582 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1583 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1584 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
1585 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1586 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
1587 6e140f28 aliguori
            if (!env->watchpoint_hit) {
1588 6e140f28 aliguori
                env->watchpoint_hit = wp;
1589 5a316526 Blue Swirl
                tb_check_watchpoint(env);
1590 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1591 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
1592 488d6577 Max Filippov
                    cpu_loop_exit(env);
1593 6e140f28 aliguori
                } else {
1594 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1595 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1596 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
1597 6e140f28 aliguori
                }
1598 06d55cc1 aliguori
            }
1599 6e140f28 aliguori
        } else {
1600 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
1601 0f459d16 pbrook
        }
1602 0f459d16 pbrook
    }
1603 0f459d16 pbrook
}
1604 0f459d16 pbrook
1605 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
1606 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
1607 6658ffb8 pbrook
   phys routines.  */
1608 a8170e5e Avi Kivity
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1609 1ec9b909 Avi Kivity
                               unsigned size)
1610 6658ffb8 pbrook
{
1611 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1612 1ec9b909 Avi Kivity
    switch (size) {
1613 2c17449b Edgar E. Iglesias
    case 1: return ldub_phys(&address_space_memory, addr);
1614 41701aa4 Edgar E. Iglesias
    case 2: return lduw_phys(&address_space_memory, addr);
1615 fdfba1a2 Edgar E. Iglesias
    case 4: return ldl_phys(&address_space_memory, addr);
1616 1ec9b909 Avi Kivity
    default: abort();
1617 1ec9b909 Avi Kivity
    }
1618 6658ffb8 pbrook
}
1619 6658ffb8 pbrook
1620 a8170e5e Avi Kivity
static void watch_mem_write(void *opaque, hwaddr addr,
1621 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
1622 6658ffb8 pbrook
{
1623 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1624 1ec9b909 Avi Kivity
    switch (size) {
1625 67364150 Max Filippov
    case 1:
1626 db3be60d Edgar E. Iglesias
        stb_phys(&address_space_memory, addr, val);
1627 67364150 Max Filippov
        break;
1628 67364150 Max Filippov
    case 2:
1629 5ce5944d Edgar E. Iglesias
        stw_phys(&address_space_memory, addr, val);
1630 67364150 Max Filippov
        break;
1631 67364150 Max Filippov
    case 4:
1632 ab1da857 Edgar E. Iglesias
        stl_phys(&address_space_memory, addr, val);
1633 67364150 Max Filippov
        break;
1634 1ec9b909 Avi Kivity
    default: abort();
1635 1ec9b909 Avi Kivity
    }
1636 6658ffb8 pbrook
}
1637 6658ffb8 pbrook
1638 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
1639 1ec9b909 Avi Kivity
    .read = watch_mem_read,
1640 1ec9b909 Avi Kivity
    .write = watch_mem_write,
1641 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1642 6658ffb8 pbrook
};
1643 6658ffb8 pbrook
1644 a8170e5e Avi Kivity
static uint64_t subpage_read(void *opaque, hwaddr addr,
1645 70c68e44 Avi Kivity
                             unsigned len)
1646 db7b5426 blueswir1
{
1647 acc9d80b Jan Kiszka
    subpage_t *subpage = opaque;
1648 acc9d80b Jan Kiszka
    uint8_t buf[4];
1649 791af8c8 Paolo Bonzini
1650 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1651 016e9d62 Amos Kong
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1652 acc9d80b Jan Kiszka
           subpage, len, addr);
1653 db7b5426 blueswir1
#endif
1654 acc9d80b Jan Kiszka
    address_space_read(subpage->as, addr + subpage->base, buf, len);
1655 acc9d80b Jan Kiszka
    switch (len) {
1656 acc9d80b Jan Kiszka
    case 1:
1657 acc9d80b Jan Kiszka
        return ldub_p(buf);
1658 acc9d80b Jan Kiszka
    case 2:
1659 acc9d80b Jan Kiszka
        return lduw_p(buf);
1660 acc9d80b Jan Kiszka
    case 4:
1661 acc9d80b Jan Kiszka
        return ldl_p(buf);
1662 acc9d80b Jan Kiszka
    default:
1663 acc9d80b Jan Kiszka
        abort();
1664 acc9d80b Jan Kiszka
    }
1665 db7b5426 blueswir1
}
1666 db7b5426 blueswir1
1667 a8170e5e Avi Kivity
static void subpage_write(void *opaque, hwaddr addr,
1668 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
1669 db7b5426 blueswir1
{
1670 acc9d80b Jan Kiszka
    subpage_t *subpage = opaque;
1671 acc9d80b Jan Kiszka
    uint8_t buf[4];
1672 acc9d80b Jan Kiszka
1673 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1674 016e9d62 Amos Kong
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1675 acc9d80b Jan Kiszka
           " value %"PRIx64"\n",
1676 acc9d80b Jan Kiszka
           __func__, subpage, len, addr, value);
1677 db7b5426 blueswir1
#endif
1678 acc9d80b Jan Kiszka
    switch (len) {
1679 acc9d80b Jan Kiszka
    case 1:
1680 acc9d80b Jan Kiszka
        stb_p(buf, value);
1681 acc9d80b Jan Kiszka
        break;
1682 acc9d80b Jan Kiszka
    case 2:
1683 acc9d80b Jan Kiszka
        stw_p(buf, value);
1684 acc9d80b Jan Kiszka
        break;
1685 acc9d80b Jan Kiszka
    case 4:
1686 acc9d80b Jan Kiszka
        stl_p(buf, value);
1687 acc9d80b Jan Kiszka
        break;
1688 acc9d80b Jan Kiszka
    default:
1689 acc9d80b Jan Kiszka
        abort();
1690 acc9d80b Jan Kiszka
    }
1691 acc9d80b Jan Kiszka
    address_space_write(subpage->as, addr + subpage->base, buf, len);
1692 db7b5426 blueswir1
}
1693 db7b5426 blueswir1
1694 c353e4cc Paolo Bonzini
static bool subpage_accepts(void *opaque, hwaddr addr,
1695 016e9d62 Amos Kong
                            unsigned len, bool is_write)
1696 c353e4cc Paolo Bonzini
{
1697 acc9d80b Jan Kiszka
    subpage_t *subpage = opaque;
1698 c353e4cc Paolo Bonzini
#if defined(DEBUG_SUBPAGE)
1699 016e9d62 Amos Kong
    printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1700 acc9d80b Jan Kiszka
           __func__, subpage, is_write ? 'w' : 'r', len, addr);
1701 c353e4cc Paolo Bonzini
#endif
1702 c353e4cc Paolo Bonzini
1703 acc9d80b Jan Kiszka
    return address_space_access_valid(subpage->as, addr + subpage->base,
1704 016e9d62 Amos Kong
                                      len, is_write);
1705 c353e4cc Paolo Bonzini
}
1706 c353e4cc Paolo Bonzini
1707 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
1708 70c68e44 Avi Kivity
    .read = subpage_read,
1709 70c68e44 Avi Kivity
    .write = subpage_write,
1710 c353e4cc Paolo Bonzini
    .valid.accepts = subpage_accepts,
1711 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1712 db7b5426 blueswir1
};
1713 db7b5426 blueswir1
1714 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1715 5312bd8b Avi Kivity
                             uint16_t section)
1716 db7b5426 blueswir1
{
1717 db7b5426 blueswir1
    int idx, eidx;
1718 db7b5426 blueswir1
1719 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1720 db7b5426 blueswir1
        return -1;
1721 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
1722 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
1723 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1724 016e9d62 Amos Kong
    printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1725 016e9d62 Amos Kong
           __func__, mmio, start, end, idx, eidx, section);
1726 db7b5426 blueswir1
#endif
1727 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
1728 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
1729 db7b5426 blueswir1
    }
1730 db7b5426 blueswir1
1731 db7b5426 blueswir1
    return 0;
1732 db7b5426 blueswir1
}
1733 db7b5426 blueswir1
1734 acc9d80b Jan Kiszka
static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1735 db7b5426 blueswir1
{
1736 c227f099 Anthony Liguori
    subpage_t *mmio;
1737 db7b5426 blueswir1
1738 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
1739 1eec614b aliguori
1740 acc9d80b Jan Kiszka
    mmio->as = as;
1741 1eec614b aliguori
    mmio->base = base;
1742 2c9b15ca Paolo Bonzini
    memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1743 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
1744 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
1745 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1746 016e9d62 Amos Kong
    printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1747 016e9d62 Amos Kong
           mmio, base, TARGET_PAGE_SIZE);
1748 db7b5426 blueswir1
#endif
1749 b41aac4f Liu Ping Fan
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1750 db7b5426 blueswir1
1751 db7b5426 blueswir1
    return mmio;
1752 db7b5426 blueswir1
}
1753 db7b5426 blueswir1
1754 53cb28cb Marcel Apfelbaum
static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
1755 5312bd8b Avi Kivity
{
1756 5312bd8b Avi Kivity
    MemoryRegionSection section = {
1757 3be91e86 Edgar E. Iglesias
        .address_space = &address_space_memory,
1758 5312bd8b Avi Kivity
        .mr = mr,
1759 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
1760 5312bd8b Avi Kivity
        .offset_within_region = 0,
1761 052e87b0 Paolo Bonzini
        .size = int128_2_64(),
1762 5312bd8b Avi Kivity
    };
1763 5312bd8b Avi Kivity
1764 53cb28cb Marcel Apfelbaum
    return phys_section_add(map, &section);
1765 5312bd8b Avi Kivity
}
1766 5312bd8b Avi Kivity
1767 77717094 Edgar E. Iglesias
MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
1768 aa102231 Avi Kivity
{
1769 77717094 Edgar E. Iglesias
    return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
1770 aa102231 Avi Kivity
}
1771 aa102231 Avi Kivity
1772 e9179ce1 Avi Kivity
static void io_mem_init(void)
1773 e9179ce1 Avi Kivity
{
1774 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1775 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1776 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
1777 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1778 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
1779 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1780 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
1781 e9179ce1 Avi Kivity
}
1782 e9179ce1 Avi Kivity
1783 ac1970fb Avi Kivity
static void mem_begin(MemoryListener *listener)
1784 ac1970fb Avi Kivity
{
1785 89ae337a Paolo Bonzini
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1786 53cb28cb Marcel Apfelbaum
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1787 53cb28cb Marcel Apfelbaum
    uint16_t n;
1788 53cb28cb Marcel Apfelbaum
1789 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_unassigned);
1790 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_UNASSIGNED);
1791 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_notdirty);
1792 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_NOTDIRTY);
1793 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_rom);
1794 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_ROM);
1795 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_watch);
1796 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_WATCH);
1797 00752703 Paolo Bonzini
1798 9736e55b Michael S. Tsirkin
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1799 00752703 Paolo Bonzini
    d->as = as;
1800 00752703 Paolo Bonzini
    as->next_dispatch = d;
1801 00752703 Paolo Bonzini
}
1802 00752703 Paolo Bonzini
1803 00752703 Paolo Bonzini
static void mem_commit(MemoryListener *listener)
1804 00752703 Paolo Bonzini
{
1805 00752703 Paolo Bonzini
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1806 0475d94f Paolo Bonzini
    AddressSpaceDispatch *cur = as->dispatch;
1807 0475d94f Paolo Bonzini
    AddressSpaceDispatch *next = as->next_dispatch;
1808 0475d94f Paolo Bonzini
1809 53cb28cb Marcel Apfelbaum
    phys_page_compact_all(next, next->map.nodes_nb);
1810 b35ba30f Michael S. Tsirkin
1811 0475d94f Paolo Bonzini
    as->dispatch = next;
1812 b41aac4f Liu Ping Fan
1813 53cb28cb Marcel Apfelbaum
    if (cur) {
1814 53cb28cb Marcel Apfelbaum
        phys_sections_free(&cur->map);
1815 53cb28cb Marcel Apfelbaum
        g_free(cur);
1816 53cb28cb Marcel Apfelbaum
    }
1817 9affd6fc Paolo Bonzini
}
1818 9affd6fc Paolo Bonzini
1819 1d71148e Avi Kivity
static void tcg_commit(MemoryListener *listener)
1820 50c1e149 Avi Kivity
{
1821 182735ef Andreas Färber
    CPUState *cpu;
1822 117712c3 Avi Kivity
1823 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
1824 117712c3 Avi Kivity
       reset the modified entries */
1825 117712c3 Avi Kivity
    /* XXX: slow ! */
1826 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1827 182735ef Andreas Färber
        CPUArchState *env = cpu->env_ptr;
1828 182735ef Andreas Färber
1829 33bde2e1 Edgar E. Iglesias
        /* FIXME: Disentangle the cpu.h circular files deps so we can
1830 33bde2e1 Edgar E. Iglesias
           directly get the right CPU from listener.  */
1831 33bde2e1 Edgar E. Iglesias
        if (cpu->tcg_as_listener != listener) {
1832 33bde2e1 Edgar E. Iglesias
            continue;
1833 33bde2e1 Edgar E. Iglesias
        }
1834 117712c3 Avi Kivity
        tlb_flush(env, 1);
1835 117712c3 Avi Kivity
    }
1836 50c1e149 Avi Kivity
}
1837 50c1e149 Avi Kivity
1838 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
1839 93632747 Avi Kivity
{
1840 981fdf23 Juan Quintela
    cpu_physical_memory_set_dirty_tracking(true);
1841 93632747 Avi Kivity
}
1842 93632747 Avi Kivity
1843 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
1844 93632747 Avi Kivity
{
1845 981fdf23 Juan Quintela
    cpu_physical_memory_set_dirty_tracking(false);
1846 93632747 Avi Kivity
}
1847 93632747 Avi Kivity
1848 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
1849 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
1850 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
1851 ac1970fb Avi Kivity
    .priority = 1,
1852 93632747 Avi Kivity
};
1853 93632747 Avi Kivity
1854 ac1970fb Avi Kivity
void address_space_init_dispatch(AddressSpace *as)
1855 ac1970fb Avi Kivity
{
1856 00752703 Paolo Bonzini
    as->dispatch = NULL;
1857 89ae337a Paolo Bonzini
    as->dispatch_listener = (MemoryListener) {
1858 ac1970fb Avi Kivity
        .begin = mem_begin,
1859 00752703 Paolo Bonzini
        .commit = mem_commit,
1860 ac1970fb Avi Kivity
        .region_add = mem_add,
1861 ac1970fb Avi Kivity
        .region_nop = mem_add,
1862 ac1970fb Avi Kivity
        .priority = 0,
1863 ac1970fb Avi Kivity
    };
1864 89ae337a Paolo Bonzini
    memory_listener_register(&as->dispatch_listener, as);
1865 ac1970fb Avi Kivity
}
1866 ac1970fb Avi Kivity
1867 83f3c251 Avi Kivity
void address_space_destroy_dispatch(AddressSpace *as)
1868 83f3c251 Avi Kivity
{
1869 83f3c251 Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
1870 83f3c251 Avi Kivity
1871 89ae337a Paolo Bonzini
    memory_listener_unregister(&as->dispatch_listener);
1872 83f3c251 Avi Kivity
    g_free(d);
1873 83f3c251 Avi Kivity
    as->dispatch = NULL;
1874 83f3c251 Avi Kivity
}
1875 83f3c251 Avi Kivity
1876 62152b8a Avi Kivity
static void memory_map_init(void)
1877 62152b8a Avi Kivity
{
1878 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
1879 03f49957 Paolo Bonzini
1880 57271d63 Paolo Bonzini
    memory_region_init(system_memory, NULL, "system", UINT64_MAX);
1881 7dca8043 Alexey Kardashevskiy
    address_space_init(&address_space_memory, system_memory, "memory");
1882 309cb471 Avi Kivity
1883 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
1884 3bb28b72 Jan Kiszka
    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1885 3bb28b72 Jan Kiszka
                          65536);
1886 7dca8043 Alexey Kardashevskiy
    address_space_init(&address_space_io, system_io, "I/O");
1887 93632747 Avi Kivity
1888 f6790af6 Avi Kivity
    memory_listener_register(&core_memory_listener, &address_space_memory);
1889 62152b8a Avi Kivity
}
1890 62152b8a Avi Kivity
1891 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
1892 62152b8a Avi Kivity
{
1893 62152b8a Avi Kivity
    return system_memory;
1894 62152b8a Avi Kivity
}
1895 62152b8a Avi Kivity
1896 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
1897 309cb471 Avi Kivity
{
1898 309cb471 Avi Kivity
    return system_io;
1899 309cb471 Avi Kivity
}
1900 309cb471 Avi Kivity
1901 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
1902 e2eef170 pbrook
1903 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
1904 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
1905 f17ec444 Andreas Färber
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1906 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
1907 13eb76e0 bellard
{
1908 13eb76e0 bellard
    int l, flags;
1909 13eb76e0 bellard
    target_ulong page;
1910 53a5960a pbrook
    void * p;
1911 13eb76e0 bellard
1912 13eb76e0 bellard
    while (len > 0) {
1913 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
1914 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1915 13eb76e0 bellard
        if (l > len)
1916 13eb76e0 bellard
            l = len;
1917 13eb76e0 bellard
        flags = page_get_flags(page);
1918 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
1919 a68fe89c Paul Brook
            return -1;
1920 13eb76e0 bellard
        if (is_write) {
1921 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
1922 a68fe89c Paul Brook
                return -1;
1923 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1924 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1925 a68fe89c Paul Brook
                return -1;
1926 72fb7daa aurel32
            memcpy(p, buf, l);
1927 72fb7daa aurel32
            unlock_user(p, addr, l);
1928 13eb76e0 bellard
        } else {
1929 13eb76e0 bellard
            if (!(flags & PAGE_READ))
1930 a68fe89c Paul Brook
                return -1;
1931 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1932 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1933 a68fe89c Paul Brook
                return -1;
1934 72fb7daa aurel32
            memcpy(buf, p, l);
1935 5b257578 aurel32
            unlock_user(p, addr, 0);
1936 13eb76e0 bellard
        }
1937 13eb76e0 bellard
        len -= l;
1938 13eb76e0 bellard
        buf += l;
1939 13eb76e0 bellard
        addr += l;
1940 13eb76e0 bellard
    }
1941 a68fe89c Paul Brook
    return 0;
1942 13eb76e0 bellard
}
1943 8df1cd07 bellard
1944 13eb76e0 bellard
#else
1945 51d7a9eb Anthony PERARD
1946 a8170e5e Avi Kivity
static void invalidate_and_set_dirty(hwaddr addr,
1947 a8170e5e Avi Kivity
                                     hwaddr length)
1948 51d7a9eb Anthony PERARD
{
1949 a2cd8c85 Juan Quintela
    if (cpu_physical_memory_is_clean(addr)) {
1950 51d7a9eb Anthony PERARD
        /* invalidate code */
1951 51d7a9eb Anthony PERARD
        tb_invalidate_phys_page_range(addr, addr + length, 0);
1952 51d7a9eb Anthony PERARD
        /* set dirty bit */
1953 52159192 Juan Quintela
        cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1954 52159192 Juan Quintela
        cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
1955 51d7a9eb Anthony PERARD
    }
1956 e226939d Anthony PERARD
    xen_modified_memory(addr, length);
1957 51d7a9eb Anthony PERARD
}
1958 51d7a9eb Anthony PERARD
1959 23326164 Richard Henderson
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1960 82f2563f Paolo Bonzini
{
1961 e1622f4b Paolo Bonzini
    unsigned access_size_max = mr->ops->valid.max_access_size;
1962 23326164 Richard Henderson
1963 23326164 Richard Henderson
    /* Regions are assumed to support 1-4 byte accesses unless
1964 23326164 Richard Henderson
       otherwise specified.  */
1965 23326164 Richard Henderson
    if (access_size_max == 0) {
1966 23326164 Richard Henderson
        access_size_max = 4;
1967 23326164 Richard Henderson
    }
1968 23326164 Richard Henderson
1969 23326164 Richard Henderson
    /* Bound the maximum access by the alignment of the address.  */
1970 23326164 Richard Henderson
    if (!mr->ops->impl.unaligned) {
1971 23326164 Richard Henderson
        unsigned align_size_max = addr & -addr;
1972 23326164 Richard Henderson
        if (align_size_max != 0 && align_size_max < access_size_max) {
1973 23326164 Richard Henderson
            access_size_max = align_size_max;
1974 23326164 Richard Henderson
        }
1975 82f2563f Paolo Bonzini
    }
1976 23326164 Richard Henderson
1977 23326164 Richard Henderson
    /* Don't attempt accesses larger than the maximum.  */
1978 23326164 Richard Henderson
    if (l > access_size_max) {
1979 23326164 Richard Henderson
        l = access_size_max;
1980 82f2563f Paolo Bonzini
    }
1981 098178f2 Paolo Bonzini
    if (l & (l - 1)) {
1982 098178f2 Paolo Bonzini
        l = 1 << (qemu_fls(l) - 1);
1983 098178f2 Paolo Bonzini
    }
1984 23326164 Richard Henderson
1985 23326164 Richard Henderson
    return l;
1986 82f2563f Paolo Bonzini
}
1987 82f2563f Paolo Bonzini
1988 fd8aaa76 Paolo Bonzini
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1989 ac1970fb Avi Kivity
                      int len, bool is_write)
1990 13eb76e0 bellard
{
1991 149f54b5 Paolo Bonzini
    hwaddr l;
1992 13eb76e0 bellard
    uint8_t *ptr;
1993 791af8c8 Paolo Bonzini
    uint64_t val;
1994 149f54b5 Paolo Bonzini
    hwaddr addr1;
1995 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
1996 fd8aaa76 Paolo Bonzini
    bool error = false;
1997 3b46e624 ths
1998 13eb76e0 bellard
    while (len > 0) {
1999 149f54b5 Paolo Bonzini
        l = len;
2000 5c8a00ce Paolo Bonzini
        mr = address_space_translate(as, addr, &addr1, &l, is_write);
2001 3b46e624 ths
2002 13eb76e0 bellard
        if (is_write) {
2003 5c8a00ce Paolo Bonzini
            if (!memory_access_is_direct(mr, is_write)) {
2004 5c8a00ce Paolo Bonzini
                l = memory_access_size(mr, l, addr1);
2005 4917cf44 Andreas Färber
                /* XXX: could force current_cpu to NULL to avoid
2006 6a00d601 bellard
                   potential bugs */
2007 23326164 Richard Henderson
                switch (l) {
2008 23326164 Richard Henderson
                case 8:
2009 23326164 Richard Henderson
                    /* 64 bit write access */
2010 23326164 Richard Henderson
                    val = ldq_p(buf);
2011 23326164 Richard Henderson
                    error |= io_mem_write(mr, addr1, val, 8);
2012 23326164 Richard Henderson
                    break;
2013 23326164 Richard Henderson
                case 4:
2014 1c213d19 bellard
                    /* 32 bit write access */
2015 c27004ec bellard
                    val = ldl_p(buf);
2016 5c8a00ce Paolo Bonzini
                    error |= io_mem_write(mr, addr1, val, 4);
2017 23326164 Richard Henderson
                    break;
2018 23326164 Richard Henderson
                case 2:
2019 1c213d19 bellard
                    /* 16 bit write access */
2020 c27004ec bellard
                    val = lduw_p(buf);
2021 5c8a00ce Paolo Bonzini
                    error |= io_mem_write(mr, addr1, val, 2);
2022 23326164 Richard Henderson
                    break;
2023 23326164 Richard Henderson
                case 1:
2024 1c213d19 bellard
                    /* 8 bit write access */
2025 c27004ec bellard
                    val = ldub_p(buf);
2026 5c8a00ce Paolo Bonzini
                    error |= io_mem_write(mr, addr1, val, 1);
2027 23326164 Richard Henderson
                    break;
2028 23326164 Richard Henderson
                default:
2029 23326164 Richard Henderson
                    abort();
2030 13eb76e0 bellard
                }
2031 2bbfa05d Paolo Bonzini
            } else {
2032 5c8a00ce Paolo Bonzini
                addr1 += memory_region_get_ram_addr(mr);
2033 13eb76e0 bellard
                /* RAM case */
2034 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
2035 13eb76e0 bellard
                memcpy(ptr, buf, l);
2036 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
2037 13eb76e0 bellard
            }
2038 13eb76e0 bellard
        } else {
2039 5c8a00ce Paolo Bonzini
            if (!memory_access_is_direct(mr, is_write)) {
2040 13eb76e0 bellard
                /* I/O case */
2041 5c8a00ce Paolo Bonzini
                l = memory_access_size(mr, l, addr1);
2042 23326164 Richard Henderson
                switch (l) {
2043 23326164 Richard Henderson
                case 8:
2044 23326164 Richard Henderson
                    /* 64 bit read access */
2045 23326164 Richard Henderson
                    error |= io_mem_read(mr, addr1, &val, 8);
2046 23326164 Richard Henderson
                    stq_p(buf, val);
2047 23326164 Richard Henderson
                    break;
2048 23326164 Richard Henderson
                case 4:
2049 13eb76e0 bellard
                    /* 32 bit read access */
2050 5c8a00ce Paolo Bonzini
                    error |= io_mem_read(mr, addr1, &val, 4);
2051 c27004ec bellard
                    stl_p(buf, val);
2052 23326164 Richard Henderson
                    break;
2053 23326164 Richard Henderson
                case 2:
2054 13eb76e0 bellard
                    /* 16 bit read access */
2055 5c8a00ce Paolo Bonzini
                    error |= io_mem_read(mr, addr1, &val, 2);
2056 c27004ec bellard
                    stw_p(buf, val);
2057 23326164 Richard Henderson
                    break;
2058 23326164 Richard Henderson
                case 1:
2059 1c213d19 bellard
                    /* 8 bit read access */
2060 5c8a00ce Paolo Bonzini
                    error |= io_mem_read(mr, addr1, &val, 1);
2061 c27004ec bellard
                    stb_p(buf, val);
2062 23326164 Richard Henderson
                    break;
2063 23326164 Richard Henderson
                default:
2064 23326164 Richard Henderson
                    abort();
2065 13eb76e0 bellard
                }
2066 13eb76e0 bellard
            } else {
2067 13eb76e0 bellard
                /* RAM case */
2068 5c8a00ce Paolo Bonzini
                ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2069 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
2070 13eb76e0 bellard
            }
2071 13eb76e0 bellard
        }
2072 13eb76e0 bellard
        len -= l;
2073 13eb76e0 bellard
        buf += l;
2074 13eb76e0 bellard
        addr += l;
2075 13eb76e0 bellard
    }
2076 fd8aaa76 Paolo Bonzini
2077 fd8aaa76 Paolo Bonzini
    return error;
2078 13eb76e0 bellard
}
2079 8df1cd07 bellard
2080 fd8aaa76 Paolo Bonzini
bool address_space_write(AddressSpace *as, hwaddr addr,
2081 ac1970fb Avi Kivity
                         const uint8_t *buf, int len)
2082 ac1970fb Avi Kivity
{
2083 fd8aaa76 Paolo Bonzini
    return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2084 ac1970fb Avi Kivity
}
2085 ac1970fb Avi Kivity
2086 fd8aaa76 Paolo Bonzini
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2087 ac1970fb Avi Kivity
{
2088 fd8aaa76 Paolo Bonzini
    return address_space_rw(as, addr, buf, len, false);
2089 ac1970fb Avi Kivity
}
2090 ac1970fb Avi Kivity
2091 ac1970fb Avi Kivity
2092 a8170e5e Avi Kivity
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2093 ac1970fb Avi Kivity
                            int len, int is_write)
2094 ac1970fb Avi Kivity
{
2095 fd8aaa76 Paolo Bonzini
    address_space_rw(&address_space_memory, addr, buf, len, is_write);
2096 ac1970fb Avi Kivity
}
2097 ac1970fb Avi Kivity
2098 582b55a9 Alexander Graf
enum write_rom_type {
2099 582b55a9 Alexander Graf
    WRITE_DATA,
2100 582b55a9 Alexander Graf
    FLUSH_CACHE,
2101 582b55a9 Alexander Graf
};
2102 582b55a9 Alexander Graf
2103 2a221651 Edgar E. Iglesias
static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2104 582b55a9 Alexander Graf
    hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2105 d0ecd2aa bellard
{
2106 149f54b5 Paolo Bonzini
    hwaddr l;
2107 d0ecd2aa bellard
    uint8_t *ptr;
2108 149f54b5 Paolo Bonzini
    hwaddr addr1;
2109 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2110 3b46e624 ths
2111 d0ecd2aa bellard
    while (len > 0) {
2112 149f54b5 Paolo Bonzini
        l = len;
2113 2a221651 Edgar E. Iglesias
        mr = address_space_translate(as, addr, &addr1, &l, true);
2114 3b46e624 ths
2115 5c8a00ce Paolo Bonzini
        if (!(memory_region_is_ram(mr) ||
2116 5c8a00ce Paolo Bonzini
              memory_region_is_romd(mr))) {
2117 d0ecd2aa bellard
            /* do nothing */
2118 d0ecd2aa bellard
        } else {
2119 5c8a00ce Paolo Bonzini
            addr1 += memory_region_get_ram_addr(mr);
2120 d0ecd2aa bellard
            /* ROM/RAM case */
2121 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
2122 582b55a9 Alexander Graf
            switch (type) {
2123 582b55a9 Alexander Graf
            case WRITE_DATA:
2124 582b55a9 Alexander Graf
                memcpy(ptr, buf, l);
2125 582b55a9 Alexander Graf
                invalidate_and_set_dirty(addr1, l);
2126 582b55a9 Alexander Graf
                break;
2127 582b55a9 Alexander Graf
            case FLUSH_CACHE:
2128 582b55a9 Alexander Graf
                flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2129 582b55a9 Alexander Graf
                break;
2130 582b55a9 Alexander Graf
            }
2131 d0ecd2aa bellard
        }
2132 d0ecd2aa bellard
        len -= l;
2133 d0ecd2aa bellard
        buf += l;
2134 d0ecd2aa bellard
        addr += l;
2135 d0ecd2aa bellard
    }
2136 d0ecd2aa bellard
}
2137 d0ecd2aa bellard
2138 582b55a9 Alexander Graf
/* used for ROM loading : can write in RAM and ROM */
2139 2a221651 Edgar E. Iglesias
void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2140 582b55a9 Alexander Graf
                                   const uint8_t *buf, int len)
2141 582b55a9 Alexander Graf
{
2142 2a221651 Edgar E. Iglesias
    cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2143 582b55a9 Alexander Graf
}
2144 582b55a9 Alexander Graf
2145 582b55a9 Alexander Graf
void cpu_flush_icache_range(hwaddr start, int len)
2146 582b55a9 Alexander Graf
{
2147 582b55a9 Alexander Graf
    /*
2148 582b55a9 Alexander Graf
     * This function should do the same thing as an icache flush that was
2149 582b55a9 Alexander Graf
     * triggered from within the guest. For TCG we are always cache coherent,
2150 582b55a9 Alexander Graf
     * so there is no need to flush anything. For KVM / Xen we need to flush
2151 582b55a9 Alexander Graf
     * the host's instruction cache at least.
2152 582b55a9 Alexander Graf
     */
2153 582b55a9 Alexander Graf
    if (tcg_enabled()) {
2154 582b55a9 Alexander Graf
        return;
2155 582b55a9 Alexander Graf
    }
2156 582b55a9 Alexander Graf
2157 2a221651 Edgar E. Iglesias
    cpu_physical_memory_write_rom_internal(&address_space_memory,
2158 2a221651 Edgar E. Iglesias
                                           start, NULL, len, FLUSH_CACHE);
2159 582b55a9 Alexander Graf
}
2160 582b55a9 Alexander Graf
2161 6d16c2f8 aliguori
typedef struct {
2162 d3e71559 Paolo Bonzini
    MemoryRegion *mr;
2163 6d16c2f8 aliguori
    void *buffer;
2164 a8170e5e Avi Kivity
    hwaddr addr;
2165 a8170e5e Avi Kivity
    hwaddr len;
2166 6d16c2f8 aliguori
} BounceBuffer;
2167 6d16c2f8 aliguori
2168 6d16c2f8 aliguori
static BounceBuffer bounce;
2169 6d16c2f8 aliguori
2170 ba223c29 aliguori
typedef struct MapClient {
2171 ba223c29 aliguori
    void *opaque;
2172 ba223c29 aliguori
    void (*callback)(void *opaque);
2173 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
2174 ba223c29 aliguori
} MapClient;
2175 ba223c29 aliguori
2176 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
2177 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
2178 ba223c29 aliguori
2179 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2180 ba223c29 aliguori
{
2181 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
2182 ba223c29 aliguori
2183 ba223c29 aliguori
    client->opaque = opaque;
2184 ba223c29 aliguori
    client->callback = callback;
2185 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
2186 ba223c29 aliguori
    return client;
2187 ba223c29 aliguori
}
2188 ba223c29 aliguori
2189 8b9c99d9 Blue Swirl
static void cpu_unregister_map_client(void *_client)
2190 ba223c29 aliguori
{
2191 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
2192 ba223c29 aliguori
2193 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
2194 7267c094 Anthony Liguori
    g_free(client);
2195 ba223c29 aliguori
}
2196 ba223c29 aliguori
2197 ba223c29 aliguori
static void cpu_notify_map_clients(void)
2198 ba223c29 aliguori
{
2199 ba223c29 aliguori
    MapClient *client;
2200 ba223c29 aliguori
2201 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
2202 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
2203 ba223c29 aliguori
        client->callback(client->opaque);
2204 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
2205 ba223c29 aliguori
    }
2206 ba223c29 aliguori
}
2207 ba223c29 aliguori
2208 51644ab7 Paolo Bonzini
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2209 51644ab7 Paolo Bonzini
{
2210 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2211 51644ab7 Paolo Bonzini
    hwaddr l, xlat;
2212 51644ab7 Paolo Bonzini
2213 51644ab7 Paolo Bonzini
    while (len > 0) {
2214 51644ab7 Paolo Bonzini
        l = len;
2215 5c8a00ce Paolo Bonzini
        mr = address_space_translate(as, addr, &xlat, &l, is_write);
2216 5c8a00ce Paolo Bonzini
        if (!memory_access_is_direct(mr, is_write)) {
2217 5c8a00ce Paolo Bonzini
            l = memory_access_size(mr, l, addr);
2218 5c8a00ce Paolo Bonzini
            if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2219 51644ab7 Paolo Bonzini
                return false;
2220 51644ab7 Paolo Bonzini
            }
2221 51644ab7 Paolo Bonzini
        }
2222 51644ab7 Paolo Bonzini
2223 51644ab7 Paolo Bonzini
        len -= l;
2224 51644ab7 Paolo Bonzini
        addr += l;
2225 51644ab7 Paolo Bonzini
    }
2226 51644ab7 Paolo Bonzini
    return true;
2227 51644ab7 Paolo Bonzini
}
2228 51644ab7 Paolo Bonzini
2229 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
2230 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
2231 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
2232 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
2233 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
2234 ba223c29 aliguori
 * likely to succeed.
2235 6d16c2f8 aliguori
 */
2236 ac1970fb Avi Kivity
void *address_space_map(AddressSpace *as,
2237 a8170e5e Avi Kivity
                        hwaddr addr,
2238 a8170e5e Avi Kivity
                        hwaddr *plen,
2239 ac1970fb Avi Kivity
                        bool is_write)
2240 6d16c2f8 aliguori
{
2241 a8170e5e Avi Kivity
    hwaddr len = *plen;
2242 e3127ae0 Paolo Bonzini
    hwaddr done = 0;
2243 e3127ae0 Paolo Bonzini
    hwaddr l, xlat, base;
2244 e3127ae0 Paolo Bonzini
    MemoryRegion *mr, *this_mr;
2245 e3127ae0 Paolo Bonzini
    ram_addr_t raddr;
2246 6d16c2f8 aliguori
2247 e3127ae0 Paolo Bonzini
    if (len == 0) {
2248 e3127ae0 Paolo Bonzini
        return NULL;
2249 e3127ae0 Paolo Bonzini
    }
2250 38bee5dc Stefano Stabellini
2251 e3127ae0 Paolo Bonzini
    l = len;
2252 e3127ae0 Paolo Bonzini
    mr = address_space_translate(as, addr, &xlat, &l, is_write);
2253 e3127ae0 Paolo Bonzini
    if (!memory_access_is_direct(mr, is_write)) {
2254 e3127ae0 Paolo Bonzini
        if (bounce.buffer) {
2255 e3127ae0 Paolo Bonzini
            return NULL;
2256 6d16c2f8 aliguori
        }
2257 e85d9db5 Kevin Wolf
        /* Avoid unbounded allocations */
2258 e85d9db5 Kevin Wolf
        l = MIN(l, TARGET_PAGE_SIZE);
2259 e85d9db5 Kevin Wolf
        bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2260 e3127ae0 Paolo Bonzini
        bounce.addr = addr;
2261 e3127ae0 Paolo Bonzini
        bounce.len = l;
2262 d3e71559 Paolo Bonzini
2263 d3e71559 Paolo Bonzini
        memory_region_ref(mr);
2264 d3e71559 Paolo Bonzini
        bounce.mr = mr;
2265 e3127ae0 Paolo Bonzini
        if (!is_write) {
2266 e3127ae0 Paolo Bonzini
            address_space_read(as, addr, bounce.buffer, l);
2267 8ab934f9 Stefano Stabellini
        }
2268 6d16c2f8 aliguori
2269 e3127ae0 Paolo Bonzini
        *plen = l;
2270 e3127ae0 Paolo Bonzini
        return bounce.buffer;
2271 e3127ae0 Paolo Bonzini
    }
2272 e3127ae0 Paolo Bonzini
2273 e3127ae0 Paolo Bonzini
    base = xlat;
2274 e3127ae0 Paolo Bonzini
    raddr = memory_region_get_ram_addr(mr);
2275 e3127ae0 Paolo Bonzini
2276 e3127ae0 Paolo Bonzini
    for (;;) {
2277 6d16c2f8 aliguori
        len -= l;
2278 6d16c2f8 aliguori
        addr += l;
2279 e3127ae0 Paolo Bonzini
        done += l;
2280 e3127ae0 Paolo Bonzini
        if (len == 0) {
2281 e3127ae0 Paolo Bonzini
            break;
2282 e3127ae0 Paolo Bonzini
        }
2283 e3127ae0 Paolo Bonzini
2284 e3127ae0 Paolo Bonzini
        l = len;
2285 e3127ae0 Paolo Bonzini
        this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2286 e3127ae0 Paolo Bonzini
        if (this_mr != mr || xlat != base + done) {
2287 e3127ae0 Paolo Bonzini
            break;
2288 e3127ae0 Paolo Bonzini
        }
2289 6d16c2f8 aliguori
    }
2290 e3127ae0 Paolo Bonzini
2291 d3e71559 Paolo Bonzini
    memory_region_ref(mr);
2292 e3127ae0 Paolo Bonzini
    *plen = done;
2293 e3127ae0 Paolo Bonzini
    return qemu_ram_ptr_length(raddr + base, plen);
2294 6d16c2f8 aliguori
}
2295 6d16c2f8 aliguori
2296 ac1970fb Avi Kivity
/* Unmaps a memory region previously mapped by address_space_map().
2297 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
2298 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
2299 6d16c2f8 aliguori
 */
2300 a8170e5e Avi Kivity
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2301 a8170e5e Avi Kivity
                         int is_write, hwaddr access_len)
2302 6d16c2f8 aliguori
{
2303 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
2304 d3e71559 Paolo Bonzini
        MemoryRegion *mr;
2305 d3e71559 Paolo Bonzini
        ram_addr_t addr1;
2306 d3e71559 Paolo Bonzini
2307 d3e71559 Paolo Bonzini
        mr = qemu_ram_addr_from_host(buffer, &addr1);
2308 d3e71559 Paolo Bonzini
        assert(mr != NULL);
2309 6d16c2f8 aliguori
        if (is_write) {
2310 6d16c2f8 aliguori
            while (access_len) {
2311 6d16c2f8 aliguori
                unsigned l;
2312 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
2313 6d16c2f8 aliguori
                if (l > access_len)
2314 6d16c2f8 aliguori
                    l = access_len;
2315 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
2316 6d16c2f8 aliguori
                addr1 += l;
2317 6d16c2f8 aliguori
                access_len -= l;
2318 6d16c2f8 aliguori
            }
2319 6d16c2f8 aliguori
        }
2320 868bb33f Jan Kiszka
        if (xen_enabled()) {
2321 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
2322 050a0ddf Anthony PERARD
        }
2323 d3e71559 Paolo Bonzini
        memory_region_unref(mr);
2324 6d16c2f8 aliguori
        return;
2325 6d16c2f8 aliguori
    }
2326 6d16c2f8 aliguori
    if (is_write) {
2327 ac1970fb Avi Kivity
        address_space_write(as, bounce.addr, bounce.buffer, access_len);
2328 6d16c2f8 aliguori
    }
2329 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
2330 6d16c2f8 aliguori
    bounce.buffer = NULL;
2331 d3e71559 Paolo Bonzini
    memory_region_unref(bounce.mr);
2332 ba223c29 aliguori
    cpu_notify_map_clients();
2333 6d16c2f8 aliguori
}
2334 d0ecd2aa bellard
2335 a8170e5e Avi Kivity
void *cpu_physical_memory_map(hwaddr addr,
2336 a8170e5e Avi Kivity
                              hwaddr *plen,
2337 ac1970fb Avi Kivity
                              int is_write)
2338 ac1970fb Avi Kivity
{
2339 ac1970fb Avi Kivity
    return address_space_map(&address_space_memory, addr, plen, is_write);
2340 ac1970fb Avi Kivity
}
2341 ac1970fb Avi Kivity
2342 a8170e5e Avi Kivity
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2343 a8170e5e Avi Kivity
                               int is_write, hwaddr access_len)
2344 ac1970fb Avi Kivity
{
2345 ac1970fb Avi Kivity
    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2346 ac1970fb Avi Kivity
}
2347 ac1970fb Avi Kivity
2348 8df1cd07 bellard
/* warning: addr must be aligned */
2349 fdfba1a2 Edgar E. Iglesias
static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
2350 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2351 8df1cd07 bellard
{
2352 8df1cd07 bellard
    uint8_t *ptr;
2353 791af8c8 Paolo Bonzini
    uint64_t val;
2354 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2355 149f54b5 Paolo Bonzini
    hwaddr l = 4;
2356 149f54b5 Paolo Bonzini
    hwaddr addr1;
2357 8df1cd07 bellard
2358 fdfba1a2 Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr1, &l, false);
2359 5c8a00ce Paolo Bonzini
    if (l < 4 || !memory_access_is_direct(mr, false)) {
2360 8df1cd07 bellard
        /* I/O case */
2361 5c8a00ce Paolo Bonzini
        io_mem_read(mr, addr1, &val, 4);
2362 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2363 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2364 1e78bcc1 Alexander Graf
            val = bswap32(val);
2365 1e78bcc1 Alexander Graf
        }
2366 1e78bcc1 Alexander Graf
#else
2367 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2368 1e78bcc1 Alexander Graf
            val = bswap32(val);
2369 1e78bcc1 Alexander Graf
        }
2370 1e78bcc1 Alexander Graf
#endif
2371 8df1cd07 bellard
    } else {
2372 8df1cd07 bellard
        /* RAM case */
2373 5c8a00ce Paolo Bonzini
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2374 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2375 149f54b5 Paolo Bonzini
                               + addr1);
2376 1e78bcc1 Alexander Graf
        switch (endian) {
2377 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2378 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
2379 1e78bcc1 Alexander Graf
            break;
2380 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2381 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
2382 1e78bcc1 Alexander Graf
            break;
2383 1e78bcc1 Alexander Graf
        default:
2384 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
2385 1e78bcc1 Alexander Graf
            break;
2386 1e78bcc1 Alexander Graf
        }
2387 8df1cd07 bellard
    }
2388 8df1cd07 bellard
    return val;
2389 8df1cd07 bellard
}
2390 8df1cd07 bellard
2391 fdfba1a2 Edgar E. Iglesias
uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
2392 1e78bcc1 Alexander Graf
{
2393 fdfba1a2 Edgar E. Iglesias
    return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2394 1e78bcc1 Alexander Graf
}
2395 1e78bcc1 Alexander Graf
2396 fdfba1a2 Edgar E. Iglesias
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
2397 1e78bcc1 Alexander Graf
{
2398 fdfba1a2 Edgar E. Iglesias
    return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2399 1e78bcc1 Alexander Graf
}
2400 1e78bcc1 Alexander Graf
2401 fdfba1a2 Edgar E. Iglesias
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
2402 1e78bcc1 Alexander Graf
{
2403 fdfba1a2 Edgar E. Iglesias
    return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2404 1e78bcc1 Alexander Graf
}
2405 1e78bcc1 Alexander Graf
2406 84b7b8e7 bellard
/* warning: addr must be aligned */
2407 2c17449b Edgar E. Iglesias
static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
2408 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2409 84b7b8e7 bellard
{
2410 84b7b8e7 bellard
    uint8_t *ptr;
2411 84b7b8e7 bellard
    uint64_t val;
2412 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2413 149f54b5 Paolo Bonzini
    hwaddr l = 8;
2414 149f54b5 Paolo Bonzini
    hwaddr addr1;
2415 84b7b8e7 bellard
2416 2c17449b Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr1, &l,
2417 5c8a00ce Paolo Bonzini
                                 false);
2418 5c8a00ce Paolo Bonzini
    if (l < 8 || !memory_access_is_direct(mr, false)) {
2419 84b7b8e7 bellard
        /* I/O case */
2420 5c8a00ce Paolo Bonzini
        io_mem_read(mr, addr1, &val, 8);
2421 968a5627 Paolo Bonzini
#if defined(TARGET_WORDS_BIGENDIAN)
2422 968a5627 Paolo Bonzini
        if (endian == DEVICE_LITTLE_ENDIAN) {
2423 968a5627 Paolo Bonzini
            val = bswap64(val);
2424 968a5627 Paolo Bonzini
        }
2425 968a5627 Paolo Bonzini
#else
2426 968a5627 Paolo Bonzini
        if (endian == DEVICE_BIG_ENDIAN) {
2427 968a5627 Paolo Bonzini
            val = bswap64(val);
2428 968a5627 Paolo Bonzini
        }
2429 968a5627 Paolo Bonzini
#endif
2430 84b7b8e7 bellard
    } else {
2431 84b7b8e7 bellard
        /* RAM case */
2432 5c8a00ce Paolo Bonzini
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2433 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2434 149f54b5 Paolo Bonzini
                               + addr1);
2435 1e78bcc1 Alexander Graf
        switch (endian) {
2436 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2437 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
2438 1e78bcc1 Alexander Graf
            break;
2439 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2440 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
2441 1e78bcc1 Alexander Graf
            break;
2442 1e78bcc1 Alexander Graf
        default:
2443 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
2444 1e78bcc1 Alexander Graf
            break;
2445 1e78bcc1 Alexander Graf
        }
2446 84b7b8e7 bellard
    }
2447 84b7b8e7 bellard
    return val;
2448 84b7b8e7 bellard
}
2449 84b7b8e7 bellard
2450 2c17449b Edgar E. Iglesias
uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
2451 1e78bcc1 Alexander Graf
{
2452 2c17449b Edgar E. Iglesias
    return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2453 1e78bcc1 Alexander Graf
}
2454 1e78bcc1 Alexander Graf
2455 2c17449b Edgar E. Iglesias
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
2456 1e78bcc1 Alexander Graf
{
2457 2c17449b Edgar E. Iglesias
    return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2458 1e78bcc1 Alexander Graf
}
2459 1e78bcc1 Alexander Graf
2460 2c17449b Edgar E. Iglesias
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
2461 1e78bcc1 Alexander Graf
{
2462 2c17449b Edgar E. Iglesias
    return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2463 1e78bcc1 Alexander Graf
}
2464 1e78bcc1 Alexander Graf
2465 aab33094 bellard
/* XXX: optimize */
2466 2c17449b Edgar E. Iglesias
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2467 aab33094 bellard
{
2468 aab33094 bellard
    uint8_t val;
2469 2c17449b Edgar E. Iglesias
    address_space_rw(as, addr, &val, 1, 0);
2470 aab33094 bellard
    return val;
2471 aab33094 bellard
}
2472 aab33094 bellard
2473 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2474 41701aa4 Edgar E. Iglesias
static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
2475 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
2476 aab33094 bellard
{
2477 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2478 733f0b02 Michael S. Tsirkin
    uint64_t val;
2479 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2480 149f54b5 Paolo Bonzini
    hwaddr l = 2;
2481 149f54b5 Paolo Bonzini
    hwaddr addr1;
2482 733f0b02 Michael S. Tsirkin
2483 41701aa4 Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr1, &l,
2484 5c8a00ce Paolo Bonzini
                                 false);
2485 5c8a00ce Paolo Bonzini
    if (l < 2 || !memory_access_is_direct(mr, false)) {
2486 733f0b02 Michael S. Tsirkin
        /* I/O case */
2487 5c8a00ce Paolo Bonzini
        io_mem_read(mr, addr1, &val, 2);
2488 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2489 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2490 1e78bcc1 Alexander Graf
            val = bswap16(val);
2491 1e78bcc1 Alexander Graf
        }
2492 1e78bcc1 Alexander Graf
#else
2493 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2494 1e78bcc1 Alexander Graf
            val = bswap16(val);
2495 1e78bcc1 Alexander Graf
        }
2496 1e78bcc1 Alexander Graf
#endif
2497 733f0b02 Michael S. Tsirkin
    } else {
2498 733f0b02 Michael S. Tsirkin
        /* RAM case */
2499 5c8a00ce Paolo Bonzini
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2500 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2501 149f54b5 Paolo Bonzini
                               + addr1);
2502 1e78bcc1 Alexander Graf
        switch (endian) {
2503 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2504 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
2505 1e78bcc1 Alexander Graf
            break;
2506 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2507 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
2508 1e78bcc1 Alexander Graf
            break;
2509 1e78bcc1 Alexander Graf
        default:
2510 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
2511 1e78bcc1 Alexander Graf
            break;
2512 1e78bcc1 Alexander Graf
        }
2513 733f0b02 Michael S. Tsirkin
    }
2514 733f0b02 Michael S. Tsirkin
    return val;
2515 aab33094 bellard
}
2516 aab33094 bellard
2517 41701aa4 Edgar E. Iglesias
uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
2518 1e78bcc1 Alexander Graf
{
2519 41701aa4 Edgar E. Iglesias
    return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
2520 1e78bcc1 Alexander Graf
}
2521 1e78bcc1 Alexander Graf
2522 41701aa4 Edgar E. Iglesias
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
2523 1e78bcc1 Alexander Graf
{
2524 41701aa4 Edgar E. Iglesias
    return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
2525 1e78bcc1 Alexander Graf
}
2526 1e78bcc1 Alexander Graf
2527 41701aa4 Edgar E. Iglesias
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
2528 1e78bcc1 Alexander Graf
{
2529 41701aa4 Edgar E. Iglesias
    return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
2530 1e78bcc1 Alexander Graf
}
2531 1e78bcc1 Alexander Graf
2532 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
2533 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
2534 8df1cd07 bellard
   bits are used to track modified PTEs */
2535 2198a121 Edgar E. Iglesias
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
2536 8df1cd07 bellard
{
2537 8df1cd07 bellard
    uint8_t *ptr;
2538 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2539 149f54b5 Paolo Bonzini
    hwaddr l = 4;
2540 149f54b5 Paolo Bonzini
    hwaddr addr1;
2541 8df1cd07 bellard
2542 2198a121 Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr1, &l,
2543 5c8a00ce Paolo Bonzini
                                 true);
2544 5c8a00ce Paolo Bonzini
    if (l < 4 || !memory_access_is_direct(mr, true)) {
2545 5c8a00ce Paolo Bonzini
        io_mem_write(mr, addr1, val, 4);
2546 8df1cd07 bellard
    } else {
2547 5c8a00ce Paolo Bonzini
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2548 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2549 8df1cd07 bellard
        stl_p(ptr, val);
2550 74576198 aliguori
2551 74576198 aliguori
        if (unlikely(in_migration)) {
2552 a2cd8c85 Juan Quintela
            if (cpu_physical_memory_is_clean(addr1)) {
2553 74576198 aliguori
                /* invalidate code */
2554 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2555 74576198 aliguori
                /* set dirty bit */
2556 52159192 Juan Quintela
                cpu_physical_memory_set_dirty_flag(addr1,
2557 52159192 Juan Quintela
                                                   DIRTY_MEMORY_MIGRATION);
2558 52159192 Juan Quintela
                cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
2559 74576198 aliguori
            }
2560 74576198 aliguori
        }
2561 8df1cd07 bellard
    }
2562 8df1cd07 bellard
}
2563 8df1cd07 bellard
2564 8df1cd07 bellard
/* warning: addr must be aligned */
2565 ab1da857 Edgar E. Iglesias
static inline void stl_phys_internal(AddressSpace *as,
2566 ab1da857 Edgar E. Iglesias
                                     hwaddr addr, uint32_t val,
2567 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2568 8df1cd07 bellard
{
2569 8df1cd07 bellard
    uint8_t *ptr;
2570 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2571 149f54b5 Paolo Bonzini
    hwaddr l = 4;
2572 149f54b5 Paolo Bonzini
    hwaddr addr1;
2573 8df1cd07 bellard
2574 ab1da857 Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr1, &l,
2575 5c8a00ce Paolo Bonzini
                                 true);
2576 5c8a00ce Paolo Bonzini
    if (l < 4 || !memory_access_is_direct(mr, true)) {
2577 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2578 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2579 1e78bcc1 Alexander Graf
            val = bswap32(val);
2580 1e78bcc1 Alexander Graf
        }
2581 1e78bcc1 Alexander Graf
#else
2582 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2583 1e78bcc1 Alexander Graf
            val = bswap32(val);
2584 1e78bcc1 Alexander Graf
        }
2585 1e78bcc1 Alexander Graf
#endif
2586 5c8a00ce Paolo Bonzini
        io_mem_write(mr, addr1, val, 4);
2587 8df1cd07 bellard
    } else {
2588 8df1cd07 bellard
        /* RAM case */
2589 5c8a00ce Paolo Bonzini
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2590 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2591 1e78bcc1 Alexander Graf
        switch (endian) {
2592 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2593 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
2594 1e78bcc1 Alexander Graf
            break;
2595 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2596 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
2597 1e78bcc1 Alexander Graf
            break;
2598 1e78bcc1 Alexander Graf
        default:
2599 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
2600 1e78bcc1 Alexander Graf
            break;
2601 1e78bcc1 Alexander Graf
        }
2602 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 4);
2603 8df1cd07 bellard
    }
2604 8df1cd07 bellard
}
2605 8df1cd07 bellard
2606 ab1da857 Edgar E. Iglesias
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2607 1e78bcc1 Alexander Graf
{
2608 ab1da857 Edgar E. Iglesias
    stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2609 1e78bcc1 Alexander Graf
}
2610 1e78bcc1 Alexander Graf
2611 ab1da857 Edgar E. Iglesias
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2612 1e78bcc1 Alexander Graf
{
2613 ab1da857 Edgar E. Iglesias
    stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2614 1e78bcc1 Alexander Graf
}
2615 1e78bcc1 Alexander Graf
2616 ab1da857 Edgar E. Iglesias
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2617 1e78bcc1 Alexander Graf
{
2618 ab1da857 Edgar E. Iglesias
    stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2619 1e78bcc1 Alexander Graf
}
2620 1e78bcc1 Alexander Graf
2621 aab33094 bellard
/* XXX: optimize */
2622 db3be60d Edgar E. Iglesias
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2623 aab33094 bellard
{
2624 aab33094 bellard
    uint8_t v = val;
2625 db3be60d Edgar E. Iglesias
    address_space_rw(as, addr, &v, 1, 1);
2626 aab33094 bellard
}
2627 aab33094 bellard
2628 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2629 5ce5944d Edgar E. Iglesias
static inline void stw_phys_internal(AddressSpace *as,
2630 5ce5944d Edgar E. Iglesias
                                     hwaddr addr, uint32_t val,
2631 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2632 aab33094 bellard
{
2633 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2634 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2635 149f54b5 Paolo Bonzini
    hwaddr l = 2;
2636 149f54b5 Paolo Bonzini
    hwaddr addr1;
2637 733f0b02 Michael S. Tsirkin
2638 5ce5944d Edgar E. Iglesias
    mr = address_space_translate(as, addr, &addr1, &l, true);
2639 5c8a00ce Paolo Bonzini
    if (l < 2 || !memory_access_is_direct(mr, true)) {
2640 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2641 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2642 1e78bcc1 Alexander Graf
            val = bswap16(val);
2643 1e78bcc1 Alexander Graf
        }
2644 1e78bcc1 Alexander Graf
#else
2645 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2646 1e78bcc1 Alexander Graf
            val = bswap16(val);
2647 1e78bcc1 Alexander Graf
        }
2648 1e78bcc1 Alexander Graf
#endif
2649 5c8a00ce Paolo Bonzini
        io_mem_write(mr, addr1, val, 2);
2650 733f0b02 Michael S. Tsirkin
    } else {
2651 733f0b02 Michael S. Tsirkin
        /* RAM case */
2652 5c8a00ce Paolo Bonzini
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2653 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
2654 1e78bcc1 Alexander Graf
        switch (endian) {
2655 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2656 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
2657 1e78bcc1 Alexander Graf
            break;
2658 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2659 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
2660 1e78bcc1 Alexander Graf
            break;
2661 1e78bcc1 Alexander Graf
        default:
2662 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
2663 1e78bcc1 Alexander Graf
            break;
2664 1e78bcc1 Alexander Graf
        }
2665 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 2);
2666 733f0b02 Michael S. Tsirkin
    }
2667 aab33094 bellard
}
2668 aab33094 bellard
2669 5ce5944d Edgar E. Iglesias
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2670 1e78bcc1 Alexander Graf
{
2671 5ce5944d Edgar E. Iglesias
    stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
2672 1e78bcc1 Alexander Graf
}
2673 1e78bcc1 Alexander Graf
2674 5ce5944d Edgar E. Iglesias
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2675 1e78bcc1 Alexander Graf
{
2676 5ce5944d Edgar E. Iglesias
    stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
2677 1e78bcc1 Alexander Graf
}
2678 1e78bcc1 Alexander Graf
2679 5ce5944d Edgar E. Iglesias
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
2680 1e78bcc1 Alexander Graf
{
2681 5ce5944d Edgar E. Iglesias
    stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
2682 1e78bcc1 Alexander Graf
}
2683 1e78bcc1 Alexander Graf
2684 aab33094 bellard
/* XXX: optimize */
2685 f606604f Edgar E. Iglesias
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2686 aab33094 bellard
{
2687 aab33094 bellard
    val = tswap64(val);
2688 f606604f Edgar E. Iglesias
    address_space_rw(as, addr, (void *) &val, 8, 1);
2689 aab33094 bellard
}
2690 aab33094 bellard
2691 f606604f Edgar E. Iglesias
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2692 1e78bcc1 Alexander Graf
{
2693 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
2694 f606604f Edgar E. Iglesias
    address_space_rw(as, addr, (void *) &val, 8, 1);
2695 1e78bcc1 Alexander Graf
}
2696 1e78bcc1 Alexander Graf
2697 f606604f Edgar E. Iglesias
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
2698 1e78bcc1 Alexander Graf
{
2699 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
2700 f606604f Edgar E. Iglesias
    address_space_rw(as, addr, (void *) &val, 8, 1);
2701 1e78bcc1 Alexander Graf
}
2702 1e78bcc1 Alexander Graf
2703 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
2704 f17ec444 Andreas Färber
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2705 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
2706 13eb76e0 bellard
{
2707 13eb76e0 bellard
    int l;
2708 a8170e5e Avi Kivity
    hwaddr phys_addr;
2709 9b3c35e0 j_mayer
    target_ulong page;
2710 13eb76e0 bellard
2711 13eb76e0 bellard
    while (len > 0) {
2712 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2713 f17ec444 Andreas Färber
        phys_addr = cpu_get_phys_page_debug(cpu, page);
2714 13eb76e0 bellard
        /* if no physical page mapped, return an error */
2715 13eb76e0 bellard
        if (phys_addr == -1)
2716 13eb76e0 bellard
            return -1;
2717 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2718 13eb76e0 bellard
        if (l > len)
2719 13eb76e0 bellard
            l = len;
2720 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
2721 2e38847b Edgar E. Iglesias
        if (is_write) {
2722 2e38847b Edgar E. Iglesias
            cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2723 2e38847b Edgar E. Iglesias
        } else {
2724 2e38847b Edgar E. Iglesias
            address_space_rw(cpu->as, phys_addr, buf, l, 0);
2725 2e38847b Edgar E. Iglesias
        }
2726 13eb76e0 bellard
        len -= l;
2727 13eb76e0 bellard
        buf += l;
2728 13eb76e0 bellard
        addr += l;
2729 13eb76e0 bellard
    }
2730 13eb76e0 bellard
    return 0;
2731 13eb76e0 bellard
}
2732 a68fe89c Paul Brook
#endif
2733 13eb76e0 bellard
2734 8e4a424b Blue Swirl
#if !defined(CONFIG_USER_ONLY)
2735 8e4a424b Blue Swirl
2736 8e4a424b Blue Swirl
/*
2737 8e4a424b Blue Swirl
 * A helper function for the _utterly broken_ virtio device model to find out if
2738 8e4a424b Blue Swirl
 * it's running on a big endian machine. Don't do this at home kids!
2739 8e4a424b Blue Swirl
 */
2740 8e4a424b Blue Swirl
bool virtio_is_big_endian(void);
2741 8e4a424b Blue Swirl
bool virtio_is_big_endian(void)
2742 8e4a424b Blue Swirl
{
2743 8e4a424b Blue Swirl
#if defined(TARGET_WORDS_BIGENDIAN)
2744 8e4a424b Blue Swirl
    return true;
2745 8e4a424b Blue Swirl
#else
2746 8e4a424b Blue Swirl
    return false;
2747 8e4a424b Blue Swirl
#endif
2748 8e4a424b Blue Swirl
}
2749 8e4a424b Blue Swirl
2750 8e4a424b Blue Swirl
#endif
2751 8e4a424b Blue Swirl
2752 76f35538 Wen Congyang
#ifndef CONFIG_USER_ONLY
2753 a8170e5e Avi Kivity
bool cpu_physical_memory_is_io(hwaddr phys_addr)
2754 76f35538 Wen Congyang
{
2755 5c8a00ce Paolo Bonzini
    MemoryRegion*mr;
2756 149f54b5 Paolo Bonzini
    hwaddr l = 1;
2757 76f35538 Wen Congyang
2758 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory,
2759 5c8a00ce Paolo Bonzini
                                 phys_addr, &phys_addr, &l, false);
2760 76f35538 Wen Congyang
2761 5c8a00ce Paolo Bonzini
    return !(memory_region_is_ram(mr) ||
2762 5c8a00ce Paolo Bonzini
             memory_region_is_romd(mr));
2763 76f35538 Wen Congyang
}
2764 bd2fa51f Michael R. Hines
2765 bd2fa51f Michael R. Hines
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2766 bd2fa51f Michael R. Hines
{
2767 bd2fa51f Michael R. Hines
    RAMBlock *block;
2768 bd2fa51f Michael R. Hines
2769 bd2fa51f Michael R. Hines
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2770 bd2fa51f Michael R. Hines
        func(block->host, block->offset, block->length, opaque);
2771 bd2fa51f Michael R. Hines
    }
2772 bd2fa51f Michael R. Hines
}
2773 ec3f8c99 Peter Maydell
#endif