Statistics
| Branch: | Revision:

root / exec.c @ 981fdf23

History | View | Annotate | Download (75 kB)

1 54936004 bellard
/*
2 5b6dd868 Blue Swirl
 *  Virtual page mapping
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
27 055403b2 Stefan Weil
#include "qemu-common.h"
28 6180a181 bellard
#include "cpu.h"
29 b67d9a52 bellard
#include "tcg.h"
30 b3c7724c pbrook
#include "hw/hw.h"
31 cc9e98cb Alex Williamson
#include "hw/qdev.h"
32 1de7afc9 Paolo Bonzini
#include "qemu/osdep.h"
33 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
34 2ff3de68 Markus Armbruster
#include "sysemu/sysemu.h"
35 0d09e41a Paolo Bonzini
#include "hw/xen/xen.h"
36 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
37 1de7afc9 Paolo Bonzini
#include "qemu/config-file.h"
38 022c62cb Paolo Bonzini
#include "exec/memory.h"
39 9c17d615 Paolo Bonzini
#include "sysemu/dma.h"
40 022c62cb Paolo Bonzini
#include "exec/address-spaces.h"
41 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
42 53a5960a pbrook
#include <qemu.h>
43 432d268c Jun Nakajima
#else /* !CONFIG_USER_ONLY */
44 9c17d615 Paolo Bonzini
#include "sysemu/xen-mapcache.h"
45 6506e4f9 Stefano Stabellini
#include "trace.h"
46 53a5960a pbrook
#endif
47 0d6d3c87 Paolo Bonzini
#include "exec/cpu-all.h"
48 54936004 bellard
49 022c62cb Paolo Bonzini
#include "exec/cputlb.h"
50 5b6dd868 Blue Swirl
#include "translate-all.h"
51 0cac1b66 Blue Swirl
52 022c62cb Paolo Bonzini
#include "exec/memory-internal.h"
53 582b55a9 Alexander Graf
#include "qemu/cache-utils.h"
54 67d95c15 Avi Kivity
55 b35ba30f Michael S. Tsirkin
#include "qemu/range.h"
56 b35ba30f Michael S. Tsirkin
57 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
58 1196be37 ths
59 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
60 981fdf23 Juan Quintela
static bool in_migration;
61 94a6b54f pbrook
62 a3161038 Paolo Bonzini
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
63 62152b8a Avi Kivity
64 62152b8a Avi Kivity
static MemoryRegion *system_memory;
65 309cb471 Avi Kivity
static MemoryRegion *system_io;
66 62152b8a Avi Kivity
67 f6790af6 Avi Kivity
AddressSpace address_space_io;
68 f6790af6 Avi Kivity
AddressSpace address_space_memory;
69 2673a5da Avi Kivity
70 0844e007 Paolo Bonzini
MemoryRegion io_mem_rom, io_mem_notdirty;
71 acc9d80b Jan Kiszka
static MemoryRegion io_mem_unassigned;
72 0e0df1e2 Avi Kivity
73 e2eef170 pbrook
#endif
74 9fa3e853 bellard
75 bdc44640 Andreas Färber
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
76 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
77 6a00d601 bellard
   cpu_exec() */
78 4917cf44 Andreas Färber
DEFINE_TLS(CPUState *, current_cpu);
79 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
80 bf20dc07 ths
   1 = Precise instruction counting.
81 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
82 5708fc66 Paolo Bonzini
int use_icount;
83 6a00d601 bellard
84 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
85 4346ae3e Avi Kivity
86 1db8abb1 Paolo Bonzini
typedef struct PhysPageEntry PhysPageEntry;
87 1db8abb1 Paolo Bonzini
88 1db8abb1 Paolo Bonzini
struct PhysPageEntry {
89 9736e55b Michael S. Tsirkin
    /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
90 8b795765 Michael S. Tsirkin
    uint32_t skip : 6;
91 9736e55b Michael S. Tsirkin
     /* index into phys_sections (!skip) or phys_map_nodes (skip) */
92 8b795765 Michael S. Tsirkin
    uint32_t ptr : 26;
93 1db8abb1 Paolo Bonzini
};
94 1db8abb1 Paolo Bonzini
95 8b795765 Michael S. Tsirkin
#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96 8b795765 Michael S. Tsirkin
97 03f49957 Paolo Bonzini
/* Size of the L2 (and L3, etc) page tables.  */
98 57271d63 Paolo Bonzini
#define ADDR_SPACE_BITS 64
99 03f49957 Paolo Bonzini
100 026736ce Michael S. Tsirkin
#define P_L2_BITS 9
101 03f49957 Paolo Bonzini
#define P_L2_SIZE (1 << P_L2_BITS)
102 03f49957 Paolo Bonzini
103 03f49957 Paolo Bonzini
#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104 03f49957 Paolo Bonzini
105 03f49957 Paolo Bonzini
typedef PhysPageEntry Node[P_L2_SIZE];
106 0475d94f Paolo Bonzini
107 53cb28cb Marcel Apfelbaum
typedef struct PhysPageMap {
108 53cb28cb Marcel Apfelbaum
    unsigned sections_nb;
109 53cb28cb Marcel Apfelbaum
    unsigned sections_nb_alloc;
110 53cb28cb Marcel Apfelbaum
    unsigned nodes_nb;
111 53cb28cb Marcel Apfelbaum
    unsigned nodes_nb_alloc;
112 53cb28cb Marcel Apfelbaum
    Node *nodes;
113 53cb28cb Marcel Apfelbaum
    MemoryRegionSection *sections;
114 53cb28cb Marcel Apfelbaum
} PhysPageMap;
115 53cb28cb Marcel Apfelbaum
116 1db8abb1 Paolo Bonzini
struct AddressSpaceDispatch {
117 1db8abb1 Paolo Bonzini
    /* This is a multi-level map on the physical address space.
118 1db8abb1 Paolo Bonzini
     * The bottom level has pointers to MemoryRegionSections.
119 1db8abb1 Paolo Bonzini
     */
120 1db8abb1 Paolo Bonzini
    PhysPageEntry phys_map;
121 53cb28cb Marcel Apfelbaum
    PhysPageMap map;
122 acc9d80b Jan Kiszka
    AddressSpace *as;
123 1db8abb1 Paolo Bonzini
};
124 1db8abb1 Paolo Bonzini
125 90260c6c Jan Kiszka
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126 90260c6c Jan Kiszka
typedef struct subpage_t {
127 90260c6c Jan Kiszka
    MemoryRegion iomem;
128 acc9d80b Jan Kiszka
    AddressSpace *as;
129 90260c6c Jan Kiszka
    hwaddr base;
130 90260c6c Jan Kiszka
    uint16_t sub_section[TARGET_PAGE_SIZE];
131 90260c6c Jan Kiszka
} subpage_t;
132 90260c6c Jan Kiszka
133 b41aac4f Liu Ping Fan
#define PHYS_SECTION_UNASSIGNED 0
134 b41aac4f Liu Ping Fan
#define PHYS_SECTION_NOTDIRTY 1
135 b41aac4f Liu Ping Fan
#define PHYS_SECTION_ROM 2
136 b41aac4f Liu Ping Fan
#define PHYS_SECTION_WATCH 3
137 5312bd8b Avi Kivity
138 e2eef170 pbrook
static void io_mem_init(void);
139 62152b8a Avi Kivity
static void memory_map_init(void);
140 e2eef170 pbrook
141 1ec9b909 Avi Kivity
static MemoryRegion io_mem_watch;
142 6658ffb8 pbrook
#endif
143 fd6ce8f6 bellard
144 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
145 d6f2ea22 Avi Kivity
146 53cb28cb Marcel Apfelbaum
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
147 d6f2ea22 Avi Kivity
{
148 53cb28cb Marcel Apfelbaum
    if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
149 53cb28cb Marcel Apfelbaum
        map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
150 53cb28cb Marcel Apfelbaum
        map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
151 53cb28cb Marcel Apfelbaum
        map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
152 d6f2ea22 Avi Kivity
    }
153 f7bf5461 Avi Kivity
}
154 f7bf5461 Avi Kivity
155 53cb28cb Marcel Apfelbaum
static uint32_t phys_map_node_alloc(PhysPageMap *map)
156 f7bf5461 Avi Kivity
{
157 f7bf5461 Avi Kivity
    unsigned i;
158 8b795765 Michael S. Tsirkin
    uint32_t ret;
159 f7bf5461 Avi Kivity
160 53cb28cb Marcel Apfelbaum
    ret = map->nodes_nb++;
161 f7bf5461 Avi Kivity
    assert(ret != PHYS_MAP_NODE_NIL);
162 53cb28cb Marcel Apfelbaum
    assert(ret != map->nodes_nb_alloc);
163 03f49957 Paolo Bonzini
    for (i = 0; i < P_L2_SIZE; ++i) {
164 53cb28cb Marcel Apfelbaum
        map->nodes[ret][i].skip = 1;
165 53cb28cb Marcel Apfelbaum
        map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
166 d6f2ea22 Avi Kivity
    }
167 f7bf5461 Avi Kivity
    return ret;
168 d6f2ea22 Avi Kivity
}
169 d6f2ea22 Avi Kivity
170 53cb28cb Marcel Apfelbaum
static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
171 53cb28cb Marcel Apfelbaum
                                hwaddr *index, hwaddr *nb, uint16_t leaf,
172 2999097b Avi Kivity
                                int level)
173 f7bf5461 Avi Kivity
{
174 f7bf5461 Avi Kivity
    PhysPageEntry *p;
175 f7bf5461 Avi Kivity
    int i;
176 03f49957 Paolo Bonzini
    hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
177 108c49b8 bellard
178 9736e55b Michael S. Tsirkin
    if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
179 53cb28cb Marcel Apfelbaum
        lp->ptr = phys_map_node_alloc(map);
180 53cb28cb Marcel Apfelbaum
        p = map->nodes[lp->ptr];
181 f7bf5461 Avi Kivity
        if (level == 0) {
182 03f49957 Paolo Bonzini
            for (i = 0; i < P_L2_SIZE; i++) {
183 9736e55b Michael S. Tsirkin
                p[i].skip = 0;
184 b41aac4f Liu Ping Fan
                p[i].ptr = PHYS_SECTION_UNASSIGNED;
185 4346ae3e Avi Kivity
            }
186 67c4d23c pbrook
        }
187 f7bf5461 Avi Kivity
    } else {
188 53cb28cb Marcel Apfelbaum
        p = map->nodes[lp->ptr];
189 92e873b9 bellard
    }
190 03f49957 Paolo Bonzini
    lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
191 f7bf5461 Avi Kivity
192 03f49957 Paolo Bonzini
    while (*nb && lp < &p[P_L2_SIZE]) {
193 07f07b31 Avi Kivity
        if ((*index & (step - 1)) == 0 && *nb >= step) {
194 9736e55b Michael S. Tsirkin
            lp->skip = 0;
195 c19e8800 Avi Kivity
            lp->ptr = leaf;
196 07f07b31 Avi Kivity
            *index += step;
197 07f07b31 Avi Kivity
            *nb -= step;
198 2999097b Avi Kivity
        } else {
199 53cb28cb Marcel Apfelbaum
            phys_page_set_level(map, lp, index, nb, leaf, level - 1);
200 2999097b Avi Kivity
        }
201 2999097b Avi Kivity
        ++lp;
202 f7bf5461 Avi Kivity
    }
203 f7bf5461 Avi Kivity
}
204 f7bf5461 Avi Kivity
205 ac1970fb Avi Kivity
static void phys_page_set(AddressSpaceDispatch *d,
206 a8170e5e Avi Kivity
                          hwaddr index, hwaddr nb,
207 2999097b Avi Kivity
                          uint16_t leaf)
208 f7bf5461 Avi Kivity
{
209 2999097b Avi Kivity
    /* Wildly overreserve - it doesn't matter much. */
210 53cb28cb Marcel Apfelbaum
    phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
211 5cd2c5b6 Richard Henderson
212 53cb28cb Marcel Apfelbaum
    phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
213 92e873b9 bellard
}
214 92e873b9 bellard
215 b35ba30f Michael S. Tsirkin
/* Compact a non leaf page entry. Simply detect that the entry has a single child,
216 b35ba30f Michael S. Tsirkin
 * and update our entry so we can skip it and go directly to the destination.
217 b35ba30f Michael S. Tsirkin
 */
218 b35ba30f Michael S. Tsirkin
static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
219 b35ba30f Michael S. Tsirkin
{
220 b35ba30f Michael S. Tsirkin
    unsigned valid_ptr = P_L2_SIZE;
221 b35ba30f Michael S. Tsirkin
    int valid = 0;
222 b35ba30f Michael S. Tsirkin
    PhysPageEntry *p;
223 b35ba30f Michael S. Tsirkin
    int i;
224 b35ba30f Michael S. Tsirkin
225 b35ba30f Michael S. Tsirkin
    if (lp->ptr == PHYS_MAP_NODE_NIL) {
226 b35ba30f Michael S. Tsirkin
        return;
227 b35ba30f Michael S. Tsirkin
    }
228 b35ba30f Michael S. Tsirkin
229 b35ba30f Michael S. Tsirkin
    p = nodes[lp->ptr];
230 b35ba30f Michael S. Tsirkin
    for (i = 0; i < P_L2_SIZE; i++) {
231 b35ba30f Michael S. Tsirkin
        if (p[i].ptr == PHYS_MAP_NODE_NIL) {
232 b35ba30f Michael S. Tsirkin
            continue;
233 b35ba30f Michael S. Tsirkin
        }
234 b35ba30f Michael S. Tsirkin
235 b35ba30f Michael S. Tsirkin
        valid_ptr = i;
236 b35ba30f Michael S. Tsirkin
        valid++;
237 b35ba30f Michael S. Tsirkin
        if (p[i].skip) {
238 b35ba30f Michael S. Tsirkin
            phys_page_compact(&p[i], nodes, compacted);
239 b35ba30f Michael S. Tsirkin
        }
240 b35ba30f Michael S. Tsirkin
    }
241 b35ba30f Michael S. Tsirkin
242 b35ba30f Michael S. Tsirkin
    /* We can only compress if there's only one child. */
243 b35ba30f Michael S. Tsirkin
    if (valid != 1) {
244 b35ba30f Michael S. Tsirkin
        return;
245 b35ba30f Michael S. Tsirkin
    }
246 b35ba30f Michael S. Tsirkin
247 b35ba30f Michael S. Tsirkin
    assert(valid_ptr < P_L2_SIZE);
248 b35ba30f Michael S. Tsirkin
249 b35ba30f Michael S. Tsirkin
    /* Don't compress if it won't fit in the # of bits we have. */
250 b35ba30f Michael S. Tsirkin
    if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
251 b35ba30f Michael S. Tsirkin
        return;
252 b35ba30f Michael S. Tsirkin
    }
253 b35ba30f Michael S. Tsirkin
254 b35ba30f Michael S. Tsirkin
    lp->ptr = p[valid_ptr].ptr;
255 b35ba30f Michael S. Tsirkin
    if (!p[valid_ptr].skip) {
256 b35ba30f Michael S. Tsirkin
        /* If our only child is a leaf, make this a leaf. */
257 b35ba30f Michael S. Tsirkin
        /* By design, we should have made this node a leaf to begin with so we
258 b35ba30f Michael S. Tsirkin
         * should never reach here.
259 b35ba30f Michael S. Tsirkin
         * But since it's so simple to handle this, let's do it just in case we
260 b35ba30f Michael S. Tsirkin
         * change this rule.
261 b35ba30f Michael S. Tsirkin
         */
262 b35ba30f Michael S. Tsirkin
        lp->skip = 0;
263 b35ba30f Michael S. Tsirkin
    } else {
264 b35ba30f Michael S. Tsirkin
        lp->skip += p[valid_ptr].skip;
265 b35ba30f Michael S. Tsirkin
    }
266 b35ba30f Michael S. Tsirkin
}
267 b35ba30f Michael S. Tsirkin
268 b35ba30f Michael S. Tsirkin
static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
269 b35ba30f Michael S. Tsirkin
{
270 b35ba30f Michael S. Tsirkin
    DECLARE_BITMAP(compacted, nodes_nb);
271 b35ba30f Michael S. Tsirkin
272 b35ba30f Michael S. Tsirkin
    if (d->phys_map.skip) {
273 53cb28cb Marcel Apfelbaum
        phys_page_compact(&d->phys_map, d->map.nodes, compacted);
274 b35ba30f Michael S. Tsirkin
    }
275 b35ba30f Michael S. Tsirkin
}
276 b35ba30f Michael S. Tsirkin
277 97115a8d Michael S. Tsirkin
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
278 9affd6fc Paolo Bonzini
                                           Node *nodes, MemoryRegionSection *sections)
279 92e873b9 bellard
{
280 31ab2b4a Avi Kivity
    PhysPageEntry *p;
281 97115a8d Michael S. Tsirkin
    hwaddr index = addr >> TARGET_PAGE_BITS;
282 31ab2b4a Avi Kivity
    int i;
283 f1f6e3b8 Avi Kivity
284 9736e55b Michael S. Tsirkin
    for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
285 c19e8800 Avi Kivity
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
286 9affd6fc Paolo Bonzini
            return &sections[PHYS_SECTION_UNASSIGNED];
287 31ab2b4a Avi Kivity
        }
288 9affd6fc Paolo Bonzini
        p = nodes[lp.ptr];
289 03f49957 Paolo Bonzini
        lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
290 5312bd8b Avi Kivity
    }
291 b35ba30f Michael S. Tsirkin
292 b35ba30f Michael S. Tsirkin
    if (sections[lp.ptr].size.hi ||
293 b35ba30f Michael S. Tsirkin
        range_covers_byte(sections[lp.ptr].offset_within_address_space,
294 b35ba30f Michael S. Tsirkin
                          sections[lp.ptr].size.lo, addr)) {
295 b35ba30f Michael S. Tsirkin
        return &sections[lp.ptr];
296 b35ba30f Michael S. Tsirkin
    } else {
297 b35ba30f Michael S. Tsirkin
        return &sections[PHYS_SECTION_UNASSIGNED];
298 b35ba30f Michael S. Tsirkin
    }
299 f3705d53 Avi Kivity
}
300 f3705d53 Avi Kivity
301 e5548617 Blue Swirl
bool memory_region_is_unassigned(MemoryRegion *mr)
302 e5548617 Blue Swirl
{
303 2a8e7499 Paolo Bonzini
    return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
304 5b6dd868 Blue Swirl
        && mr != &io_mem_watch;
305 fd6ce8f6 bellard
}
306 149f54b5 Paolo Bonzini
307 c7086b4a Paolo Bonzini
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
308 90260c6c Jan Kiszka
                                                        hwaddr addr,
309 90260c6c Jan Kiszka
                                                        bool resolve_subpage)
310 9f029603 Jan Kiszka
{
311 90260c6c Jan Kiszka
    MemoryRegionSection *section;
312 90260c6c Jan Kiszka
    subpage_t *subpage;
313 90260c6c Jan Kiszka
314 53cb28cb Marcel Apfelbaum
    section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
315 90260c6c Jan Kiszka
    if (resolve_subpage && section->mr->subpage) {
316 90260c6c Jan Kiszka
        subpage = container_of(section->mr, subpage_t, iomem);
317 53cb28cb Marcel Apfelbaum
        section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
318 90260c6c Jan Kiszka
    }
319 90260c6c Jan Kiszka
    return section;
320 9f029603 Jan Kiszka
}
321 9f029603 Jan Kiszka
322 90260c6c Jan Kiszka
static MemoryRegionSection *
323 c7086b4a Paolo Bonzini
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
324 90260c6c Jan Kiszka
                                 hwaddr *plen, bool resolve_subpage)
325 149f54b5 Paolo Bonzini
{
326 149f54b5 Paolo Bonzini
    MemoryRegionSection *section;
327 149f54b5 Paolo Bonzini
    Int128 diff;
328 149f54b5 Paolo Bonzini
329 c7086b4a Paolo Bonzini
    section = address_space_lookup_region(d, addr, resolve_subpage);
330 149f54b5 Paolo Bonzini
    /* Compute offset within MemoryRegionSection */
331 149f54b5 Paolo Bonzini
    addr -= section->offset_within_address_space;
332 149f54b5 Paolo Bonzini
333 149f54b5 Paolo Bonzini
    /* Compute offset within MemoryRegion */
334 149f54b5 Paolo Bonzini
    *xlat = addr + section->offset_within_region;
335 149f54b5 Paolo Bonzini
336 149f54b5 Paolo Bonzini
    diff = int128_sub(section->mr->size, int128_make64(addr));
337 3752a036 Peter Maydell
    *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
338 149f54b5 Paolo Bonzini
    return section;
339 149f54b5 Paolo Bonzini
}
340 90260c6c Jan Kiszka
341 5c8a00ce Paolo Bonzini
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
342 5c8a00ce Paolo Bonzini
                                      hwaddr *xlat, hwaddr *plen,
343 5c8a00ce Paolo Bonzini
                                      bool is_write)
344 90260c6c Jan Kiszka
{
345 30951157 Avi Kivity
    IOMMUTLBEntry iotlb;
346 30951157 Avi Kivity
    MemoryRegionSection *section;
347 30951157 Avi Kivity
    MemoryRegion *mr;
348 30951157 Avi Kivity
    hwaddr len = *plen;
349 30951157 Avi Kivity
350 30951157 Avi Kivity
    for (;;) {
351 c7086b4a Paolo Bonzini
        section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
352 30951157 Avi Kivity
        mr = section->mr;
353 30951157 Avi Kivity
354 30951157 Avi Kivity
        if (!mr->iommu_ops) {
355 30951157 Avi Kivity
            break;
356 30951157 Avi Kivity
        }
357 30951157 Avi Kivity
358 30951157 Avi Kivity
        iotlb = mr->iommu_ops->translate(mr, addr);
359 30951157 Avi Kivity
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
360 30951157 Avi Kivity
                | (addr & iotlb.addr_mask));
361 30951157 Avi Kivity
        len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
362 30951157 Avi Kivity
        if (!(iotlb.perm & (1 << is_write))) {
363 30951157 Avi Kivity
            mr = &io_mem_unassigned;
364 30951157 Avi Kivity
            break;
365 30951157 Avi Kivity
        }
366 30951157 Avi Kivity
367 30951157 Avi Kivity
        as = iotlb.target_as;
368 30951157 Avi Kivity
    }
369 30951157 Avi Kivity
370 30951157 Avi Kivity
    *plen = len;
371 30951157 Avi Kivity
    *xlat = addr;
372 30951157 Avi Kivity
    return mr;
373 90260c6c Jan Kiszka
}
374 90260c6c Jan Kiszka
375 90260c6c Jan Kiszka
MemoryRegionSection *
376 90260c6c Jan Kiszka
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
377 90260c6c Jan Kiszka
                                  hwaddr *plen)
378 90260c6c Jan Kiszka
{
379 30951157 Avi Kivity
    MemoryRegionSection *section;
380 c7086b4a Paolo Bonzini
    section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
381 30951157 Avi Kivity
382 30951157 Avi Kivity
    assert(!section->mr->iommu_ops);
383 30951157 Avi Kivity
    return section;
384 90260c6c Jan Kiszka
}
385 5b6dd868 Blue Swirl
#endif
386 fd6ce8f6 bellard
387 5b6dd868 Blue Swirl
void cpu_exec_init_all(void)
388 fdbb84d1 Yeongkyoon Lee
{
389 5b6dd868 Blue Swirl
#if !defined(CONFIG_USER_ONLY)
390 b2a8658e Umesh Deshpande
    qemu_mutex_init(&ram_list.mutex);
391 5b6dd868 Blue Swirl
    memory_map_init();
392 5b6dd868 Blue Swirl
    io_mem_init();
393 fdbb84d1 Yeongkyoon Lee
#endif
394 5b6dd868 Blue Swirl
}
395 fdbb84d1 Yeongkyoon Lee
396 b170fce3 Andreas Färber
#if !defined(CONFIG_USER_ONLY)
397 5b6dd868 Blue Swirl
398 5b6dd868 Blue Swirl
static int cpu_common_post_load(void *opaque, int version_id)
399 fd6ce8f6 bellard
{
400 259186a7 Andreas Färber
    CPUState *cpu = opaque;
401 a513fe19 bellard
402 5b6dd868 Blue Swirl
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
403 5b6dd868 Blue Swirl
       version_id is increased. */
404 259186a7 Andreas Färber
    cpu->interrupt_request &= ~0x01;
405 259186a7 Andreas Färber
    tlb_flush(cpu->env_ptr, 1);
406 5b6dd868 Blue Swirl
407 5b6dd868 Blue Swirl
    return 0;
408 a513fe19 bellard
}
409 7501267e bellard
410 1a1562f5 Andreas Färber
const VMStateDescription vmstate_cpu_common = {
411 5b6dd868 Blue Swirl
    .name = "cpu_common",
412 5b6dd868 Blue Swirl
    .version_id = 1,
413 5b6dd868 Blue Swirl
    .minimum_version_id = 1,
414 5b6dd868 Blue Swirl
    .minimum_version_id_old = 1,
415 5b6dd868 Blue Swirl
    .post_load = cpu_common_post_load,
416 5b6dd868 Blue Swirl
    .fields      = (VMStateField []) {
417 259186a7 Andreas Färber
        VMSTATE_UINT32(halted, CPUState),
418 259186a7 Andreas Färber
        VMSTATE_UINT32(interrupt_request, CPUState),
419 5b6dd868 Blue Swirl
        VMSTATE_END_OF_LIST()
420 5b6dd868 Blue Swirl
    }
421 5b6dd868 Blue Swirl
};
422 1a1562f5 Andreas Färber
423 5b6dd868 Blue Swirl
#endif
424 ea041c0e bellard
425 38d8f5c8 Andreas Färber
CPUState *qemu_get_cpu(int index)
426 ea041c0e bellard
{
427 bdc44640 Andreas Färber
    CPUState *cpu;
428 ea041c0e bellard
429 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
430 55e5c285 Andreas Färber
        if (cpu->cpu_index == index) {
431 bdc44640 Andreas Färber
            return cpu;
432 55e5c285 Andreas Färber
        }
433 ea041c0e bellard
    }
434 5b6dd868 Blue Swirl
435 bdc44640 Andreas Färber
    return NULL;
436 ea041c0e bellard
}
437 ea041c0e bellard
438 5b6dd868 Blue Swirl
void cpu_exec_init(CPUArchState *env)
439 ea041c0e bellard
{
440 5b6dd868 Blue Swirl
    CPUState *cpu = ENV_GET_CPU(env);
441 b170fce3 Andreas Färber
    CPUClass *cc = CPU_GET_CLASS(cpu);
442 bdc44640 Andreas Färber
    CPUState *some_cpu;
443 5b6dd868 Blue Swirl
    int cpu_index;
444 5b6dd868 Blue Swirl
445 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
446 5b6dd868 Blue Swirl
    cpu_list_lock();
447 5b6dd868 Blue Swirl
#endif
448 5b6dd868 Blue Swirl
    cpu_index = 0;
449 bdc44640 Andreas Färber
    CPU_FOREACH(some_cpu) {
450 5b6dd868 Blue Swirl
        cpu_index++;
451 5b6dd868 Blue Swirl
    }
452 55e5c285 Andreas Färber
    cpu->cpu_index = cpu_index;
453 1b1ed8dc Andreas Färber
    cpu->numa_node = 0;
454 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
455 5b6dd868 Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
456 5b6dd868 Blue Swirl
#ifndef CONFIG_USER_ONLY
457 5b6dd868 Blue Swirl
    cpu->thread_id = qemu_get_thread_id();
458 5b6dd868 Blue Swirl
#endif
459 bdc44640 Andreas Färber
    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
460 5b6dd868 Blue Swirl
#if defined(CONFIG_USER_ONLY)
461 5b6dd868 Blue Swirl
    cpu_list_unlock();
462 5b6dd868 Blue Swirl
#endif
463 e0d47944 Andreas Färber
    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
464 e0d47944 Andreas Färber
        vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
465 e0d47944 Andreas Färber
    }
466 5b6dd868 Blue Swirl
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
467 5b6dd868 Blue Swirl
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
468 5b6dd868 Blue Swirl
                    cpu_save, cpu_load, env);
469 b170fce3 Andreas Färber
    assert(cc->vmsd == NULL);
470 e0d47944 Andreas Färber
    assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
471 5b6dd868 Blue Swirl
#endif
472 b170fce3 Andreas Färber
    if (cc->vmsd != NULL) {
473 b170fce3 Andreas Färber
        vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
474 b170fce3 Andreas Färber
    }
475 ea041c0e bellard
}
476 ea041c0e bellard
477 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
478 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
479 00b941e5 Andreas Färber
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
480 94df27fd Paul Brook
{
481 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
482 94df27fd Paul Brook
}
483 94df27fd Paul Brook
#else
484 00b941e5 Andreas Färber
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
485 1e7855a5 Max Filippov
{
486 e8262a1b Max Filippov
    hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
487 e8262a1b Max Filippov
    if (phys != -1) {
488 e8262a1b Max Filippov
        tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
489 e8262a1b Max Filippov
    }
490 1e7855a5 Max Filippov
}
491 c27004ec bellard
#endif
492 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
493 d720b93d bellard
494 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
495 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
496 c527ee8f Paul Brook
497 c527ee8f Paul Brook
{
498 c527ee8f Paul Brook
}
499 c527ee8f Paul Brook
500 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
501 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
502 c527ee8f Paul Brook
{
503 c527ee8f Paul Brook
    return -ENOSYS;
504 c527ee8f Paul Brook
}
505 c527ee8f Paul Brook
#else
506 6658ffb8 pbrook
/* Add a watchpoint.  */
507 9349b4f9 Andreas Färber
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
508 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
509 6658ffb8 pbrook
{
510 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
511 c0ce998e aliguori
    CPUWatchpoint *wp;
512 6658ffb8 pbrook
513 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
514 0dc23828 Max Filippov
    if ((len & (len - 1)) || (addr & ~len_mask) ||
515 0dc23828 Max Filippov
            len == 0 || len > TARGET_PAGE_SIZE) {
516 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
517 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
518 b4051334 aliguori
        return -EINVAL;
519 b4051334 aliguori
    }
520 7267c094 Anthony Liguori
    wp = g_malloc(sizeof(*wp));
521 a1d1bb31 aliguori
522 a1d1bb31 aliguori
    wp->vaddr = addr;
523 b4051334 aliguori
    wp->len_mask = len_mask;
524 a1d1bb31 aliguori
    wp->flags = flags;
525 a1d1bb31 aliguori
526 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
527 c0ce998e aliguori
    if (flags & BP_GDB)
528 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
529 c0ce998e aliguori
    else
530 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
531 6658ffb8 pbrook
532 6658ffb8 pbrook
    tlb_flush_page(env, addr);
533 a1d1bb31 aliguori
534 a1d1bb31 aliguori
    if (watchpoint)
535 a1d1bb31 aliguori
        *watchpoint = wp;
536 a1d1bb31 aliguori
    return 0;
537 6658ffb8 pbrook
}
538 6658ffb8 pbrook
539 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
540 9349b4f9 Andreas Färber
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
541 a1d1bb31 aliguori
                          int flags)
542 6658ffb8 pbrook
{
543 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
544 a1d1bb31 aliguori
    CPUWatchpoint *wp;
545 6658ffb8 pbrook
546 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
547 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
548 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
549 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
550 6658ffb8 pbrook
            return 0;
551 6658ffb8 pbrook
        }
552 6658ffb8 pbrook
    }
553 a1d1bb31 aliguori
    return -ENOENT;
554 6658ffb8 pbrook
}
555 6658ffb8 pbrook
556 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
557 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
558 a1d1bb31 aliguori
{
559 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
560 7d03f82f edgar_igl
561 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
562 a1d1bb31 aliguori
563 7267c094 Anthony Liguori
    g_free(watchpoint);
564 a1d1bb31 aliguori
}
565 a1d1bb31 aliguori
566 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
567 9349b4f9 Andreas Färber
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
568 a1d1bb31 aliguori
{
569 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
570 a1d1bb31 aliguori
571 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
572 a1d1bb31 aliguori
        if (wp->flags & mask)
573 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
574 c0ce998e aliguori
    }
575 7d03f82f edgar_igl
}
576 c527ee8f Paul Brook
#endif
577 7d03f82f edgar_igl
578 a1d1bb31 aliguori
/* Add a breakpoint.  */
579 9349b4f9 Andreas Färber
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
580 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
581 4c3a88a2 bellard
{
582 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
583 c0ce998e aliguori
    CPUBreakpoint *bp;
584 3b46e624 ths
585 7267c094 Anthony Liguori
    bp = g_malloc(sizeof(*bp));
586 4c3a88a2 bellard
587 a1d1bb31 aliguori
    bp->pc = pc;
588 a1d1bb31 aliguori
    bp->flags = flags;
589 a1d1bb31 aliguori
590 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
591 00b941e5 Andreas Färber
    if (flags & BP_GDB) {
592 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
593 00b941e5 Andreas Färber
    } else {
594 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
595 00b941e5 Andreas Färber
    }
596 3b46e624 ths
597 00b941e5 Andreas Färber
    breakpoint_invalidate(ENV_GET_CPU(env), pc);
598 a1d1bb31 aliguori
599 00b941e5 Andreas Färber
    if (breakpoint) {
600 a1d1bb31 aliguori
        *breakpoint = bp;
601 00b941e5 Andreas Färber
    }
602 4c3a88a2 bellard
    return 0;
603 4c3a88a2 bellard
#else
604 a1d1bb31 aliguori
    return -ENOSYS;
605 4c3a88a2 bellard
#endif
606 4c3a88a2 bellard
}
607 4c3a88a2 bellard
608 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
609 9349b4f9 Andreas Färber
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
610 a1d1bb31 aliguori
{
611 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
612 a1d1bb31 aliguori
    CPUBreakpoint *bp;
613 a1d1bb31 aliguori
614 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
615 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
616 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
617 a1d1bb31 aliguori
            return 0;
618 a1d1bb31 aliguori
        }
619 7d03f82f edgar_igl
    }
620 a1d1bb31 aliguori
    return -ENOENT;
621 a1d1bb31 aliguori
#else
622 a1d1bb31 aliguori
    return -ENOSYS;
623 7d03f82f edgar_igl
#endif
624 7d03f82f edgar_igl
}
625 7d03f82f edgar_igl
626 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
627 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
628 4c3a88a2 bellard
{
629 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
630 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
631 d720b93d bellard
632 00b941e5 Andreas Färber
    breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
633 a1d1bb31 aliguori
634 7267c094 Anthony Liguori
    g_free(breakpoint);
635 a1d1bb31 aliguori
#endif
636 a1d1bb31 aliguori
}
637 a1d1bb31 aliguori
638 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
639 9349b4f9 Andreas Färber
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
640 a1d1bb31 aliguori
{
641 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
642 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
643 a1d1bb31 aliguori
644 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
645 a1d1bb31 aliguori
        if (bp->flags & mask)
646 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
647 c0ce998e aliguori
    }
648 4c3a88a2 bellard
#endif
649 4c3a88a2 bellard
}
650 4c3a88a2 bellard
651 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
652 c33a346e bellard
   CPU loop after each instruction */
653 3825b28f Andreas Färber
void cpu_single_step(CPUState *cpu, int enabled)
654 c33a346e bellard
{
655 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
656 ed2803da Andreas Färber
    if (cpu->singlestep_enabled != enabled) {
657 ed2803da Andreas Färber
        cpu->singlestep_enabled = enabled;
658 ed2803da Andreas Färber
        if (kvm_enabled()) {
659 38e478ec Stefan Weil
            kvm_update_guest_debug(cpu, 0);
660 ed2803da Andreas Färber
        } else {
661 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
662 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
663 38e478ec Stefan Weil
            CPUArchState *env = cpu->env_ptr;
664 e22a25c9 aliguori
            tb_flush(env);
665 e22a25c9 aliguori
        }
666 c33a346e bellard
    }
667 c33a346e bellard
#endif
668 c33a346e bellard
}
669 c33a346e bellard
670 9349b4f9 Andreas Färber
void cpu_abort(CPUArchState *env, const char *fmt, ...)
671 7501267e bellard
{
672 878096ee Andreas Färber
    CPUState *cpu = ENV_GET_CPU(env);
673 7501267e bellard
    va_list ap;
674 493ae1f0 pbrook
    va_list ap2;
675 7501267e bellard
676 7501267e bellard
    va_start(ap, fmt);
677 493ae1f0 pbrook
    va_copy(ap2, ap);
678 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
679 7501267e bellard
    vfprintf(stderr, fmt, ap);
680 7501267e bellard
    fprintf(stderr, "\n");
681 878096ee Andreas Färber
    cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
682 93fcfe39 aliguori
    if (qemu_log_enabled()) {
683 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
684 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
685 93fcfe39 aliguori
        qemu_log("\n");
686 a0762859 Andreas Färber
        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
687 31b1a7b4 aliguori
        qemu_log_flush();
688 93fcfe39 aliguori
        qemu_log_close();
689 924edcae balrog
    }
690 493ae1f0 pbrook
    va_end(ap2);
691 f9373291 j_mayer
    va_end(ap);
692 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
693 fd052bf6 Riku Voipio
    {
694 fd052bf6 Riku Voipio
        struct sigaction act;
695 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
696 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
697 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
698 fd052bf6 Riku Voipio
    }
699 fd052bf6 Riku Voipio
#endif
700 7501267e bellard
    abort();
701 7501267e bellard
}
702 7501267e bellard
703 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
704 041603fe Paolo Bonzini
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
705 041603fe Paolo Bonzini
{
706 041603fe Paolo Bonzini
    RAMBlock *block;
707 041603fe Paolo Bonzini
708 041603fe Paolo Bonzini
    /* The list is protected by the iothread lock here.  */
709 041603fe Paolo Bonzini
    block = ram_list.mru_block;
710 041603fe Paolo Bonzini
    if (block && addr - block->offset < block->length) {
711 041603fe Paolo Bonzini
        goto found;
712 041603fe Paolo Bonzini
    }
713 041603fe Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
714 041603fe Paolo Bonzini
        if (addr - block->offset < block->length) {
715 041603fe Paolo Bonzini
            goto found;
716 041603fe Paolo Bonzini
        }
717 041603fe Paolo Bonzini
    }
718 041603fe Paolo Bonzini
719 041603fe Paolo Bonzini
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
720 041603fe Paolo Bonzini
    abort();
721 041603fe Paolo Bonzini
722 041603fe Paolo Bonzini
found:
723 041603fe Paolo Bonzini
    ram_list.mru_block = block;
724 041603fe Paolo Bonzini
    return block;
725 041603fe Paolo Bonzini
}
726 041603fe Paolo Bonzini
727 a2f4d5be Juan Quintela
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
728 d24981d3 Juan Quintela
{
729 041603fe Paolo Bonzini
    ram_addr_t start1;
730 a2f4d5be Juan Quintela
    RAMBlock *block;
731 a2f4d5be Juan Quintela
    ram_addr_t end;
732 a2f4d5be Juan Quintela
733 a2f4d5be Juan Quintela
    end = TARGET_PAGE_ALIGN(start + length);
734 a2f4d5be Juan Quintela
    start &= TARGET_PAGE_MASK;
735 d24981d3 Juan Quintela
736 041603fe Paolo Bonzini
    block = qemu_get_ram_block(start);
737 041603fe Paolo Bonzini
    assert(block == qemu_get_ram_block(end - 1));
738 041603fe Paolo Bonzini
    start1 = (uintptr_t)block->host + (start - block->offset);
739 041603fe Paolo Bonzini
    cpu_tlb_reset_dirty_all(start1, length);
740 d24981d3 Juan Quintela
}
741 d24981d3 Juan Quintela
742 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
743 a2f4d5be Juan Quintela
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
744 52159192 Juan Quintela
                                     unsigned client)
745 1ccde1cb bellard
{
746 1ccde1cb bellard
    if (length == 0)
747 1ccde1cb bellard
        return;
748 ace694cc Juan Quintela
    cpu_physical_memory_clear_dirty_range(start, length, client);
749 f23db169 bellard
750 d24981d3 Juan Quintela
    if (tcg_enabled()) {
751 a2f4d5be Juan Quintela
        tlb_reset_dirty_range_all(start, length);
752 5579c7f3 pbrook
    }
753 1ccde1cb bellard
}
754 1ccde1cb bellard
755 981fdf23 Juan Quintela
static void cpu_physical_memory_set_dirty_tracking(bool enable)
756 74576198 aliguori
{
757 74576198 aliguori
    in_migration = enable;
758 74576198 aliguori
}
759 74576198 aliguori
760 a8170e5e Avi Kivity
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
761 149f54b5 Paolo Bonzini
                                       MemoryRegionSection *section,
762 149f54b5 Paolo Bonzini
                                       target_ulong vaddr,
763 149f54b5 Paolo Bonzini
                                       hwaddr paddr, hwaddr xlat,
764 149f54b5 Paolo Bonzini
                                       int prot,
765 149f54b5 Paolo Bonzini
                                       target_ulong *address)
766 e5548617 Blue Swirl
{
767 a8170e5e Avi Kivity
    hwaddr iotlb;
768 e5548617 Blue Swirl
    CPUWatchpoint *wp;
769 e5548617 Blue Swirl
770 cc5bea60 Blue Swirl
    if (memory_region_is_ram(section->mr)) {
771 e5548617 Blue Swirl
        /* Normal RAM.  */
772 e5548617 Blue Swirl
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
773 149f54b5 Paolo Bonzini
            + xlat;
774 e5548617 Blue Swirl
        if (!section->readonly) {
775 b41aac4f Liu Ping Fan
            iotlb |= PHYS_SECTION_NOTDIRTY;
776 e5548617 Blue Swirl
        } else {
777 b41aac4f Liu Ping Fan
            iotlb |= PHYS_SECTION_ROM;
778 e5548617 Blue Swirl
        }
779 e5548617 Blue Swirl
    } else {
780 53cb28cb Marcel Apfelbaum
        iotlb = section - address_space_memory.dispatch->map.sections;
781 149f54b5 Paolo Bonzini
        iotlb += xlat;
782 e5548617 Blue Swirl
    }
783 e5548617 Blue Swirl
784 e5548617 Blue Swirl
    /* Make accesses to pages with watchpoints go via the
785 e5548617 Blue Swirl
       watchpoint trap routines.  */
786 e5548617 Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
787 e5548617 Blue Swirl
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
788 e5548617 Blue Swirl
            /* Avoid trapping reads of pages with a write breakpoint. */
789 e5548617 Blue Swirl
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
790 b41aac4f Liu Ping Fan
                iotlb = PHYS_SECTION_WATCH + paddr;
791 e5548617 Blue Swirl
                *address |= TLB_MMIO;
792 e5548617 Blue Swirl
                break;
793 e5548617 Blue Swirl
            }
794 e5548617 Blue Swirl
        }
795 e5548617 Blue Swirl
    }
796 e5548617 Blue Swirl
797 e5548617 Blue Swirl
    return iotlb;
798 e5548617 Blue Swirl
}
799 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
800 9fa3e853 bellard
801 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
802 8da3ff18 pbrook
803 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
804 5312bd8b Avi Kivity
                             uint16_t section);
805 acc9d80b Jan Kiszka
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
806 54688b1e Avi Kivity
807 575ddeb4 Stefan Weil
static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
808 91138037 Markus Armbruster
809 91138037 Markus Armbruster
/*
810 91138037 Markus Armbruster
 * Set a custom physical guest memory alloator.
811 91138037 Markus Armbruster
 * Accelerators with unusual needs may need this.  Hopefully, we can
812 91138037 Markus Armbruster
 * get rid of it eventually.
813 91138037 Markus Armbruster
 */
814 575ddeb4 Stefan Weil
void phys_mem_set_alloc(void *(*alloc)(size_t))
815 91138037 Markus Armbruster
{
816 91138037 Markus Armbruster
    phys_mem_alloc = alloc;
817 91138037 Markus Armbruster
}
818 91138037 Markus Armbruster
819 53cb28cb Marcel Apfelbaum
static uint16_t phys_section_add(PhysPageMap *map,
820 53cb28cb Marcel Apfelbaum
                                 MemoryRegionSection *section)
821 5312bd8b Avi Kivity
{
822 68f3f65b Paolo Bonzini
    /* The physical section number is ORed with a page-aligned
823 68f3f65b Paolo Bonzini
     * pointer to produce the iotlb entries.  Thus it should
824 68f3f65b Paolo Bonzini
     * never overflow into the page-aligned value.
825 68f3f65b Paolo Bonzini
     */
826 53cb28cb Marcel Apfelbaum
    assert(map->sections_nb < TARGET_PAGE_SIZE);
827 68f3f65b Paolo Bonzini
828 53cb28cb Marcel Apfelbaum
    if (map->sections_nb == map->sections_nb_alloc) {
829 53cb28cb Marcel Apfelbaum
        map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
830 53cb28cb Marcel Apfelbaum
        map->sections = g_renew(MemoryRegionSection, map->sections,
831 53cb28cb Marcel Apfelbaum
                                map->sections_nb_alloc);
832 5312bd8b Avi Kivity
    }
833 53cb28cb Marcel Apfelbaum
    map->sections[map->sections_nb] = *section;
834 dfde4e6e Paolo Bonzini
    memory_region_ref(section->mr);
835 53cb28cb Marcel Apfelbaum
    return map->sections_nb++;
836 5312bd8b Avi Kivity
}
837 5312bd8b Avi Kivity
838 058bc4b5 Paolo Bonzini
static void phys_section_destroy(MemoryRegion *mr)
839 058bc4b5 Paolo Bonzini
{
840 dfde4e6e Paolo Bonzini
    memory_region_unref(mr);
841 dfde4e6e Paolo Bonzini
842 058bc4b5 Paolo Bonzini
    if (mr->subpage) {
843 058bc4b5 Paolo Bonzini
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
844 058bc4b5 Paolo Bonzini
        memory_region_destroy(&subpage->iomem);
845 058bc4b5 Paolo Bonzini
        g_free(subpage);
846 058bc4b5 Paolo Bonzini
    }
847 058bc4b5 Paolo Bonzini
}
848 058bc4b5 Paolo Bonzini
849 6092666e Paolo Bonzini
static void phys_sections_free(PhysPageMap *map)
850 5312bd8b Avi Kivity
{
851 9affd6fc Paolo Bonzini
    while (map->sections_nb > 0) {
852 9affd6fc Paolo Bonzini
        MemoryRegionSection *section = &map->sections[--map->sections_nb];
853 058bc4b5 Paolo Bonzini
        phys_section_destroy(section->mr);
854 058bc4b5 Paolo Bonzini
    }
855 9affd6fc Paolo Bonzini
    g_free(map->sections);
856 9affd6fc Paolo Bonzini
    g_free(map->nodes);
857 5312bd8b Avi Kivity
}
858 5312bd8b Avi Kivity
859 ac1970fb Avi Kivity
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
860 0f0cb164 Avi Kivity
{
861 0f0cb164 Avi Kivity
    subpage_t *subpage;
862 a8170e5e Avi Kivity
    hwaddr base = section->offset_within_address_space
863 0f0cb164 Avi Kivity
        & TARGET_PAGE_MASK;
864 97115a8d Michael S. Tsirkin
    MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
865 53cb28cb Marcel Apfelbaum
                                                   d->map.nodes, d->map.sections);
866 0f0cb164 Avi Kivity
    MemoryRegionSection subsection = {
867 0f0cb164 Avi Kivity
        .offset_within_address_space = base,
868 052e87b0 Paolo Bonzini
        .size = int128_make64(TARGET_PAGE_SIZE),
869 0f0cb164 Avi Kivity
    };
870 a8170e5e Avi Kivity
    hwaddr start, end;
871 0f0cb164 Avi Kivity
872 f3705d53 Avi Kivity
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
873 0f0cb164 Avi Kivity
874 f3705d53 Avi Kivity
    if (!(existing->mr->subpage)) {
875 acc9d80b Jan Kiszka
        subpage = subpage_init(d->as, base);
876 0f0cb164 Avi Kivity
        subsection.mr = &subpage->iomem;
877 ac1970fb Avi Kivity
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
878 53cb28cb Marcel Apfelbaum
                      phys_section_add(&d->map, &subsection));
879 0f0cb164 Avi Kivity
    } else {
880 f3705d53 Avi Kivity
        subpage = container_of(existing->mr, subpage_t, iomem);
881 0f0cb164 Avi Kivity
    }
882 0f0cb164 Avi Kivity
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
883 052e87b0 Paolo Bonzini
    end = start + int128_get64(section->size) - 1;
884 53cb28cb Marcel Apfelbaum
    subpage_register(subpage, start, end,
885 53cb28cb Marcel Apfelbaum
                     phys_section_add(&d->map, section));
886 0f0cb164 Avi Kivity
}
887 0f0cb164 Avi Kivity
888 0f0cb164 Avi Kivity
889 052e87b0 Paolo Bonzini
static void register_multipage(AddressSpaceDispatch *d,
890 052e87b0 Paolo Bonzini
                               MemoryRegionSection *section)
891 33417e70 bellard
{
892 a8170e5e Avi Kivity
    hwaddr start_addr = section->offset_within_address_space;
893 53cb28cb Marcel Apfelbaum
    uint16_t section_index = phys_section_add(&d->map, section);
894 052e87b0 Paolo Bonzini
    uint64_t num_pages = int128_get64(int128_rshift(section->size,
895 052e87b0 Paolo Bonzini
                                                    TARGET_PAGE_BITS));
896 dd81124b Avi Kivity
897 733d5ef5 Paolo Bonzini
    assert(num_pages);
898 733d5ef5 Paolo Bonzini
    phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
899 33417e70 bellard
}
900 33417e70 bellard
901 ac1970fb Avi Kivity
static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
902 0f0cb164 Avi Kivity
{
903 89ae337a Paolo Bonzini
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
904 00752703 Paolo Bonzini
    AddressSpaceDispatch *d = as->next_dispatch;
905 99b9cc06 Paolo Bonzini
    MemoryRegionSection now = *section, remain = *section;
906 052e87b0 Paolo Bonzini
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
907 0f0cb164 Avi Kivity
908 733d5ef5 Paolo Bonzini
    if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
909 733d5ef5 Paolo Bonzini
        uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
910 733d5ef5 Paolo Bonzini
                       - now.offset_within_address_space;
911 733d5ef5 Paolo Bonzini
912 052e87b0 Paolo Bonzini
        now.size = int128_min(int128_make64(left), now.size);
913 ac1970fb Avi Kivity
        register_subpage(d, &now);
914 733d5ef5 Paolo Bonzini
    } else {
915 052e87b0 Paolo Bonzini
        now.size = int128_zero();
916 733d5ef5 Paolo Bonzini
    }
917 052e87b0 Paolo Bonzini
    while (int128_ne(remain.size, now.size)) {
918 052e87b0 Paolo Bonzini
        remain.size = int128_sub(remain.size, now.size);
919 052e87b0 Paolo Bonzini
        remain.offset_within_address_space += int128_get64(now.size);
920 052e87b0 Paolo Bonzini
        remain.offset_within_region += int128_get64(now.size);
921 69b67646 Tyler Hall
        now = remain;
922 052e87b0 Paolo Bonzini
        if (int128_lt(remain.size, page_size)) {
923 733d5ef5 Paolo Bonzini
            register_subpage(d, &now);
924 88266249 Hu Tao
        } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
925 052e87b0 Paolo Bonzini
            now.size = page_size;
926 ac1970fb Avi Kivity
            register_subpage(d, &now);
927 69b67646 Tyler Hall
        } else {
928 052e87b0 Paolo Bonzini
            now.size = int128_and(now.size, int128_neg(page_size));
929 ac1970fb Avi Kivity
            register_multipage(d, &now);
930 69b67646 Tyler Hall
        }
931 0f0cb164 Avi Kivity
    }
932 0f0cb164 Avi Kivity
}
933 0f0cb164 Avi Kivity
934 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
935 62a2744c Sheng Yang
{
936 62a2744c Sheng Yang
    if (kvm_enabled())
937 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
938 62a2744c Sheng Yang
}
939 62a2744c Sheng Yang
940 b2a8658e Umesh Deshpande
void qemu_mutex_lock_ramlist(void)
941 b2a8658e Umesh Deshpande
{
942 b2a8658e Umesh Deshpande
    qemu_mutex_lock(&ram_list.mutex);
943 b2a8658e Umesh Deshpande
}
944 b2a8658e Umesh Deshpande
945 b2a8658e Umesh Deshpande
void qemu_mutex_unlock_ramlist(void)
946 b2a8658e Umesh Deshpande
{
947 b2a8658e Umesh Deshpande
    qemu_mutex_unlock(&ram_list.mutex);
948 b2a8658e Umesh Deshpande
}
949 b2a8658e Umesh Deshpande
950 e1e84ba0 Markus Armbruster
#ifdef __linux__
951 c902760f Marcelo Tosatti
952 c902760f Marcelo Tosatti
#include <sys/vfs.h>
953 c902760f Marcelo Tosatti
954 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
955 c902760f Marcelo Tosatti
956 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
957 c902760f Marcelo Tosatti
{
958 c902760f Marcelo Tosatti
    struct statfs fs;
959 c902760f Marcelo Tosatti
    int ret;
960 c902760f Marcelo Tosatti
961 c902760f Marcelo Tosatti
    do {
962 9742bf26 Yoshiaki Tamura
        ret = statfs(path, &fs);
963 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
964 c902760f Marcelo Tosatti
965 c902760f Marcelo Tosatti
    if (ret != 0) {
966 9742bf26 Yoshiaki Tamura
        perror(path);
967 9742bf26 Yoshiaki Tamura
        return 0;
968 c902760f Marcelo Tosatti
    }
969 c902760f Marcelo Tosatti
970 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
971 9742bf26 Yoshiaki Tamura
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
972 c902760f Marcelo Tosatti
973 c902760f Marcelo Tosatti
    return fs.f_bsize;
974 c902760f Marcelo Tosatti
}
975 c902760f Marcelo Tosatti
976 ef36fa14 Marcelo Tosatti
static sigjmp_buf sigjump;
977 ef36fa14 Marcelo Tosatti
978 ef36fa14 Marcelo Tosatti
static void sigbus_handler(int signal)
979 ef36fa14 Marcelo Tosatti
{
980 ef36fa14 Marcelo Tosatti
    siglongjmp(sigjump, 1);
981 ef36fa14 Marcelo Tosatti
}
982 ef36fa14 Marcelo Tosatti
983 04b16653 Alex Williamson
static void *file_ram_alloc(RAMBlock *block,
984 04b16653 Alex Williamson
                            ram_addr_t memory,
985 04b16653 Alex Williamson
                            const char *path)
986 c902760f Marcelo Tosatti
{
987 c902760f Marcelo Tosatti
    char *filename;
988 8ca761f6 Peter Feiner
    char *sanitized_name;
989 8ca761f6 Peter Feiner
    char *c;
990 c902760f Marcelo Tosatti
    void *area;
991 c902760f Marcelo Tosatti
    int fd;
992 c902760f Marcelo Tosatti
    unsigned long hpagesize;
993 c902760f Marcelo Tosatti
994 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
995 c902760f Marcelo Tosatti
    if (!hpagesize) {
996 9742bf26 Yoshiaki Tamura
        return NULL;
997 c902760f Marcelo Tosatti
    }
998 c902760f Marcelo Tosatti
999 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
1000 c902760f Marcelo Tosatti
        return NULL;
1001 c902760f Marcelo Tosatti
    }
1002 c902760f Marcelo Tosatti
1003 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
1004 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1005 c902760f Marcelo Tosatti
        return NULL;
1006 c902760f Marcelo Tosatti
    }
1007 c902760f Marcelo Tosatti
1008 8ca761f6 Peter Feiner
    /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1009 8ca761f6 Peter Feiner
    sanitized_name = g_strdup(block->mr->name);
1010 8ca761f6 Peter Feiner
    for (c = sanitized_name; *c != '\0'; c++) {
1011 8ca761f6 Peter Feiner
        if (*c == '/')
1012 8ca761f6 Peter Feiner
            *c = '_';
1013 8ca761f6 Peter Feiner
    }
1014 8ca761f6 Peter Feiner
1015 8ca761f6 Peter Feiner
    filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1016 8ca761f6 Peter Feiner
                               sanitized_name);
1017 8ca761f6 Peter Feiner
    g_free(sanitized_name);
1018 c902760f Marcelo Tosatti
1019 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
1020 c902760f Marcelo Tosatti
    if (fd < 0) {
1021 9742bf26 Yoshiaki Tamura
        perror("unable to create backing store for hugepages");
1022 e4ada482 Stefan Weil
        g_free(filename);
1023 9742bf26 Yoshiaki Tamura
        return NULL;
1024 c902760f Marcelo Tosatti
    }
1025 c902760f Marcelo Tosatti
    unlink(filename);
1026 e4ada482 Stefan Weil
    g_free(filename);
1027 c902760f Marcelo Tosatti
1028 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
1029 c902760f Marcelo Tosatti
1030 c902760f Marcelo Tosatti
    /*
1031 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
1032 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
1033 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
1034 c902760f Marcelo Tosatti
     * mmap will fail.
1035 c902760f Marcelo Tosatti
     */
1036 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
1037 9742bf26 Yoshiaki Tamura
        perror("ftruncate");
1038 c902760f Marcelo Tosatti
1039 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1040 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
1041 9742bf26 Yoshiaki Tamura
        perror("file_ram_alloc: can't mmap RAM pages");
1042 9742bf26 Yoshiaki Tamura
        close(fd);
1043 9742bf26 Yoshiaki Tamura
        return (NULL);
1044 c902760f Marcelo Tosatti
    }
1045 ef36fa14 Marcelo Tosatti
1046 ef36fa14 Marcelo Tosatti
    if (mem_prealloc) {
1047 ef36fa14 Marcelo Tosatti
        int ret, i;
1048 ef36fa14 Marcelo Tosatti
        struct sigaction act, oldact;
1049 ef36fa14 Marcelo Tosatti
        sigset_t set, oldset;
1050 ef36fa14 Marcelo Tosatti
1051 ef36fa14 Marcelo Tosatti
        memset(&act, 0, sizeof(act));
1052 ef36fa14 Marcelo Tosatti
        act.sa_handler = &sigbus_handler;
1053 ef36fa14 Marcelo Tosatti
        act.sa_flags = 0;
1054 ef36fa14 Marcelo Tosatti
1055 ef36fa14 Marcelo Tosatti
        ret = sigaction(SIGBUS, &act, &oldact);
1056 ef36fa14 Marcelo Tosatti
        if (ret) {
1057 ef36fa14 Marcelo Tosatti
            perror("file_ram_alloc: failed to install signal handler");
1058 ef36fa14 Marcelo Tosatti
            exit(1);
1059 ef36fa14 Marcelo Tosatti
        }
1060 ef36fa14 Marcelo Tosatti
1061 ef36fa14 Marcelo Tosatti
        /* unblock SIGBUS */
1062 ef36fa14 Marcelo Tosatti
        sigemptyset(&set);
1063 ef36fa14 Marcelo Tosatti
        sigaddset(&set, SIGBUS);
1064 ef36fa14 Marcelo Tosatti
        pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1065 ef36fa14 Marcelo Tosatti
1066 ef36fa14 Marcelo Tosatti
        if (sigsetjmp(sigjump, 1)) {
1067 ef36fa14 Marcelo Tosatti
            fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1068 ef36fa14 Marcelo Tosatti
            exit(1);
1069 ef36fa14 Marcelo Tosatti
        }
1070 ef36fa14 Marcelo Tosatti
1071 ef36fa14 Marcelo Tosatti
        /* MAP_POPULATE silently ignores failures */
1072 ef36fa14 Marcelo Tosatti
        for (i = 0; i < (memory/hpagesize)-1; i++) {
1073 ef36fa14 Marcelo Tosatti
            memset(area + (hpagesize*i), 0, 1);
1074 ef36fa14 Marcelo Tosatti
        }
1075 ef36fa14 Marcelo Tosatti
1076 ef36fa14 Marcelo Tosatti
        ret = sigaction(SIGBUS, &oldact, NULL);
1077 ef36fa14 Marcelo Tosatti
        if (ret) {
1078 ef36fa14 Marcelo Tosatti
            perror("file_ram_alloc: failed to reinstall signal handler");
1079 ef36fa14 Marcelo Tosatti
            exit(1);
1080 ef36fa14 Marcelo Tosatti
        }
1081 ef36fa14 Marcelo Tosatti
1082 ef36fa14 Marcelo Tosatti
        pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1083 ef36fa14 Marcelo Tosatti
    }
1084 ef36fa14 Marcelo Tosatti
1085 04b16653 Alex Williamson
    block->fd = fd;
1086 c902760f Marcelo Tosatti
    return area;
1087 c902760f Marcelo Tosatti
}
1088 e1e84ba0 Markus Armbruster
#else
1089 e1e84ba0 Markus Armbruster
static void *file_ram_alloc(RAMBlock *block,
1090 e1e84ba0 Markus Armbruster
                            ram_addr_t memory,
1091 e1e84ba0 Markus Armbruster
                            const char *path)
1092 e1e84ba0 Markus Armbruster
{
1093 e1e84ba0 Markus Armbruster
    fprintf(stderr, "-mem-path not supported on this host\n");
1094 e1e84ba0 Markus Armbruster
    exit(1);
1095 e1e84ba0 Markus Armbruster
}
1096 c902760f Marcelo Tosatti
#endif
1097 c902760f Marcelo Tosatti
1098 d17b5288 Alex Williamson
static ram_addr_t find_ram_offset(ram_addr_t size)
1099 d17b5288 Alex Williamson
{
1100 04b16653 Alex Williamson
    RAMBlock *block, *next_block;
1101 3e837b2c Alex Williamson
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1102 04b16653 Alex Williamson
1103 49cd9ac6 Stefan Hajnoczi
    assert(size != 0); /* it would hand out same offset multiple times */
1104 49cd9ac6 Stefan Hajnoczi
1105 a3161038 Paolo Bonzini
    if (QTAILQ_EMPTY(&ram_list.blocks))
1106 04b16653 Alex Williamson
        return 0;
1107 04b16653 Alex Williamson
1108 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1109 f15fbc4b Anthony PERARD
        ram_addr_t end, next = RAM_ADDR_MAX;
1110 04b16653 Alex Williamson
1111 04b16653 Alex Williamson
        end = block->offset + block->length;
1112 04b16653 Alex Williamson
1113 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1114 04b16653 Alex Williamson
            if (next_block->offset >= end) {
1115 04b16653 Alex Williamson
                next = MIN(next, next_block->offset);
1116 04b16653 Alex Williamson
            }
1117 04b16653 Alex Williamson
        }
1118 04b16653 Alex Williamson
        if (next - end >= size && next - end < mingap) {
1119 3e837b2c Alex Williamson
            offset = end;
1120 04b16653 Alex Williamson
            mingap = next - end;
1121 04b16653 Alex Williamson
        }
1122 04b16653 Alex Williamson
    }
1123 3e837b2c Alex Williamson
1124 3e837b2c Alex Williamson
    if (offset == RAM_ADDR_MAX) {
1125 3e837b2c Alex Williamson
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1126 3e837b2c Alex Williamson
                (uint64_t)size);
1127 3e837b2c Alex Williamson
        abort();
1128 3e837b2c Alex Williamson
    }
1129 3e837b2c Alex Williamson
1130 04b16653 Alex Williamson
    return offset;
1131 04b16653 Alex Williamson
}
1132 04b16653 Alex Williamson
1133 652d7ec2 Juan Quintela
ram_addr_t last_ram_offset(void)
1134 04b16653 Alex Williamson
{
1135 d17b5288 Alex Williamson
    RAMBlock *block;
1136 d17b5288 Alex Williamson
    ram_addr_t last = 0;
1137 d17b5288 Alex Williamson
1138 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
1139 d17b5288 Alex Williamson
        last = MAX(last, block->offset + block->length);
1140 d17b5288 Alex Williamson
1141 d17b5288 Alex Williamson
    return last;
1142 d17b5288 Alex Williamson
}
1143 d17b5288 Alex Williamson
1144 ddb97f1d Jason Baron
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1145 ddb97f1d Jason Baron
{
1146 ddb97f1d Jason Baron
    int ret;
1147 ddb97f1d Jason Baron
1148 ddb97f1d Jason Baron
    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1149 2ff3de68 Markus Armbruster
    if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1150 2ff3de68 Markus Armbruster
                           "dump-guest-core", true)) {
1151 ddb97f1d Jason Baron
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1152 ddb97f1d Jason Baron
        if (ret) {
1153 ddb97f1d Jason Baron
            perror("qemu_madvise");
1154 ddb97f1d Jason Baron
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1155 ddb97f1d Jason Baron
                            "but dump_guest_core=off specified\n");
1156 ddb97f1d Jason Baron
        }
1157 ddb97f1d Jason Baron
    }
1158 ddb97f1d Jason Baron
}
1159 ddb97f1d Jason Baron
1160 c5705a77 Avi Kivity
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1161 84b89d78 Cam Macdonell
{
1162 84b89d78 Cam Macdonell
    RAMBlock *new_block, *block;
1163 84b89d78 Cam Macdonell
1164 c5705a77 Avi Kivity
    new_block = NULL;
1165 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1166 c5705a77 Avi Kivity
        if (block->offset == addr) {
1167 c5705a77 Avi Kivity
            new_block = block;
1168 c5705a77 Avi Kivity
            break;
1169 c5705a77 Avi Kivity
        }
1170 c5705a77 Avi Kivity
    }
1171 c5705a77 Avi Kivity
    assert(new_block);
1172 c5705a77 Avi Kivity
    assert(!new_block->idstr[0]);
1173 84b89d78 Cam Macdonell
1174 09e5ab63 Anthony Liguori
    if (dev) {
1175 09e5ab63 Anthony Liguori
        char *id = qdev_get_dev_path(dev);
1176 84b89d78 Cam Macdonell
        if (id) {
1177 84b89d78 Cam Macdonell
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1178 7267c094 Anthony Liguori
            g_free(id);
1179 84b89d78 Cam Macdonell
        }
1180 84b89d78 Cam Macdonell
    }
1181 84b89d78 Cam Macdonell
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1182 84b89d78 Cam Macdonell
1183 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1184 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1185 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1186 c5705a77 Avi Kivity
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1187 84b89d78 Cam Macdonell
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1188 84b89d78 Cam Macdonell
                    new_block->idstr);
1189 84b89d78 Cam Macdonell
            abort();
1190 84b89d78 Cam Macdonell
        }
1191 84b89d78 Cam Macdonell
    }
1192 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1193 c5705a77 Avi Kivity
}
1194 c5705a77 Avi Kivity
1195 8490fc78 Luiz Capitulino
static int memory_try_enable_merging(void *addr, size_t len)
1196 8490fc78 Luiz Capitulino
{
1197 2ff3de68 Markus Armbruster
    if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1198 8490fc78 Luiz Capitulino
        /* disabled by the user */
1199 8490fc78 Luiz Capitulino
        return 0;
1200 8490fc78 Luiz Capitulino
    }
1201 8490fc78 Luiz Capitulino
1202 8490fc78 Luiz Capitulino
    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1203 8490fc78 Luiz Capitulino
}
1204 8490fc78 Luiz Capitulino
1205 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1206 c5705a77 Avi Kivity
                                   MemoryRegion *mr)
1207 c5705a77 Avi Kivity
{
1208 abb26d63 Paolo Bonzini
    RAMBlock *block, *new_block;
1209 2152f5ca Juan Quintela
    ram_addr_t old_ram_size, new_ram_size;
1210 2152f5ca Juan Quintela
1211 2152f5ca Juan Quintela
    old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1212 c5705a77 Avi Kivity
1213 c5705a77 Avi Kivity
    size = TARGET_PAGE_ALIGN(size);
1214 c5705a77 Avi Kivity
    new_block = g_malloc0(sizeof(*new_block));
1215 3435f395 Markus Armbruster
    new_block->fd = -1;
1216 84b89d78 Cam Macdonell
1217 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1218 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1219 7c637366 Avi Kivity
    new_block->mr = mr;
1220 432d268c Jun Nakajima
    new_block->offset = find_ram_offset(size);
1221 6977dfe6 Yoshiaki Tamura
    if (host) {
1222 6977dfe6 Yoshiaki Tamura
        new_block->host = host;
1223 cd19cfa2 Huang Ying
        new_block->flags |= RAM_PREALLOC_MASK;
1224 dfeaf2ab Markus Armbruster
    } else if (xen_enabled()) {
1225 dfeaf2ab Markus Armbruster
        if (mem_path) {
1226 dfeaf2ab Markus Armbruster
            fprintf(stderr, "-mem-path not supported with Xen\n");
1227 dfeaf2ab Markus Armbruster
            exit(1);
1228 dfeaf2ab Markus Armbruster
        }
1229 dfeaf2ab Markus Armbruster
        xen_ram_alloc(new_block->offset, size, mr);
1230 6977dfe6 Yoshiaki Tamura
    } else {
1231 6977dfe6 Yoshiaki Tamura
        if (mem_path) {
1232 e1e84ba0 Markus Armbruster
            if (phys_mem_alloc != qemu_anon_ram_alloc) {
1233 e1e84ba0 Markus Armbruster
                /*
1234 e1e84ba0 Markus Armbruster
                 * file_ram_alloc() needs to allocate just like
1235 e1e84ba0 Markus Armbruster
                 * phys_mem_alloc, but we haven't bothered to provide
1236 e1e84ba0 Markus Armbruster
                 * a hook there.
1237 e1e84ba0 Markus Armbruster
                 */
1238 e1e84ba0 Markus Armbruster
                fprintf(stderr,
1239 e1e84ba0 Markus Armbruster
                        "-mem-path not supported with this accelerator\n");
1240 e1e84ba0 Markus Armbruster
                exit(1);
1241 e1e84ba0 Markus Armbruster
            }
1242 6977dfe6 Yoshiaki Tamura
            new_block->host = file_ram_alloc(new_block, size, mem_path);
1243 0628c182 Markus Armbruster
        }
1244 0628c182 Markus Armbruster
        if (!new_block->host) {
1245 91138037 Markus Armbruster
            new_block->host = phys_mem_alloc(size);
1246 39228250 Markus Armbruster
            if (!new_block->host) {
1247 39228250 Markus Armbruster
                fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1248 39228250 Markus Armbruster
                        new_block->mr->name, strerror(errno));
1249 39228250 Markus Armbruster
                exit(1);
1250 39228250 Markus Armbruster
            }
1251 8490fc78 Luiz Capitulino
            memory_try_enable_merging(new_block->host, size);
1252 6977dfe6 Yoshiaki Tamura
        }
1253 c902760f Marcelo Tosatti
    }
1254 94a6b54f pbrook
    new_block->length = size;
1255 94a6b54f pbrook
1256 abb26d63 Paolo Bonzini
    /* Keep the list sorted from biggest to smallest block.  */
1257 abb26d63 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1258 abb26d63 Paolo Bonzini
        if (block->length < new_block->length) {
1259 abb26d63 Paolo Bonzini
            break;
1260 abb26d63 Paolo Bonzini
        }
1261 abb26d63 Paolo Bonzini
    }
1262 abb26d63 Paolo Bonzini
    if (block) {
1263 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_BEFORE(block, new_block, next);
1264 abb26d63 Paolo Bonzini
    } else {
1265 abb26d63 Paolo Bonzini
        QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1266 abb26d63 Paolo Bonzini
    }
1267 0d6d3c87 Paolo Bonzini
    ram_list.mru_block = NULL;
1268 94a6b54f pbrook
1269 f798b07f Umesh Deshpande
    ram_list.version++;
1270 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1271 f798b07f Umesh Deshpande
1272 2152f5ca Juan Quintela
    new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1273 2152f5ca Juan Quintela
1274 2152f5ca Juan Quintela
    if (new_ram_size > old_ram_size) {
1275 1ab4c8ce Juan Quintela
        int i;
1276 1ab4c8ce Juan Quintela
        for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1277 1ab4c8ce Juan Quintela
            ram_list.dirty_memory[i] =
1278 1ab4c8ce Juan Quintela
                bitmap_zero_extend(ram_list.dirty_memory[i],
1279 1ab4c8ce Juan Quintela
                                   old_ram_size, new_ram_size);
1280 1ab4c8ce Juan Quintela
       }
1281 2152f5ca Juan Quintela
    }
1282 75218e7f Juan Quintela
    cpu_physical_memory_set_dirty_range(new_block->offset, size);
1283 94a6b54f pbrook
1284 ddb97f1d Jason Baron
    qemu_ram_setup_dump(new_block->host, size);
1285 ad0b5321 Luiz Capitulino
    qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1286 3e469dbf Andrea Arcangeli
    qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1287 ddb97f1d Jason Baron
1288 6f0437e8 Jan Kiszka
    if (kvm_enabled())
1289 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
1290 6f0437e8 Jan Kiszka
1291 94a6b54f pbrook
    return new_block->offset;
1292 94a6b54f pbrook
}
1293 e9a1ab19 bellard
1294 c5705a77 Avi Kivity
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1295 6977dfe6 Yoshiaki Tamura
{
1296 c5705a77 Avi Kivity
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
1297 6977dfe6 Yoshiaki Tamura
}
1298 6977dfe6 Yoshiaki Tamura
1299 1f2e98b6 Alex Williamson
void qemu_ram_free_from_ptr(ram_addr_t addr)
1300 1f2e98b6 Alex Williamson
{
1301 1f2e98b6 Alex Williamson
    RAMBlock *block;
1302 1f2e98b6 Alex Williamson
1303 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1304 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1305 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1306 1f2e98b6 Alex Williamson
        if (addr == block->offset) {
1307 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1308 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1309 f798b07f Umesh Deshpande
            ram_list.version++;
1310 7267c094 Anthony Liguori
            g_free(block);
1311 b2a8658e Umesh Deshpande
            break;
1312 1f2e98b6 Alex Williamson
        }
1313 1f2e98b6 Alex Williamson
    }
1314 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1315 1f2e98b6 Alex Williamson
}
1316 1f2e98b6 Alex Williamson
1317 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
1318 e9a1ab19 bellard
{
1319 04b16653 Alex Williamson
    RAMBlock *block;
1320 04b16653 Alex Williamson
1321 b2a8658e Umesh Deshpande
    /* This assumes the iothread lock is taken here too.  */
1322 b2a8658e Umesh Deshpande
    qemu_mutex_lock_ramlist();
1323 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1324 04b16653 Alex Williamson
        if (addr == block->offset) {
1325 a3161038 Paolo Bonzini
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1326 0d6d3c87 Paolo Bonzini
            ram_list.mru_block = NULL;
1327 f798b07f Umesh Deshpande
            ram_list.version++;
1328 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1329 cd19cfa2 Huang Ying
                ;
1330 dfeaf2ab Markus Armbruster
            } else if (xen_enabled()) {
1331 dfeaf2ab Markus Armbruster
                xen_invalidate_map_cache_entry(block->host);
1332 089f3f76 Stefan Weil
#ifndef _WIN32
1333 3435f395 Markus Armbruster
            } else if (block->fd >= 0) {
1334 3435f395 Markus Armbruster
                munmap(block->host, block->length);
1335 3435f395 Markus Armbruster
                close(block->fd);
1336 089f3f76 Stefan Weil
#endif
1337 04b16653 Alex Williamson
            } else {
1338 dfeaf2ab Markus Armbruster
                qemu_anon_ram_free(block->host, block->length);
1339 04b16653 Alex Williamson
            }
1340 7267c094 Anthony Liguori
            g_free(block);
1341 b2a8658e Umesh Deshpande
            break;
1342 04b16653 Alex Williamson
        }
1343 04b16653 Alex Williamson
    }
1344 b2a8658e Umesh Deshpande
    qemu_mutex_unlock_ramlist();
1345 04b16653 Alex Williamson
1346 e9a1ab19 bellard
}
1347 e9a1ab19 bellard
1348 cd19cfa2 Huang Ying
#ifndef _WIN32
1349 cd19cfa2 Huang Ying
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1350 cd19cfa2 Huang Ying
{
1351 cd19cfa2 Huang Ying
    RAMBlock *block;
1352 cd19cfa2 Huang Ying
    ram_addr_t offset;
1353 cd19cfa2 Huang Ying
    int flags;
1354 cd19cfa2 Huang Ying
    void *area, *vaddr;
1355 cd19cfa2 Huang Ying
1356 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1357 cd19cfa2 Huang Ying
        offset = addr - block->offset;
1358 cd19cfa2 Huang Ying
        if (offset < block->length) {
1359 cd19cfa2 Huang Ying
            vaddr = block->host + offset;
1360 cd19cfa2 Huang Ying
            if (block->flags & RAM_PREALLOC_MASK) {
1361 cd19cfa2 Huang Ying
                ;
1362 dfeaf2ab Markus Armbruster
            } else if (xen_enabled()) {
1363 dfeaf2ab Markus Armbruster
                abort();
1364 cd19cfa2 Huang Ying
            } else {
1365 cd19cfa2 Huang Ying
                flags = MAP_FIXED;
1366 cd19cfa2 Huang Ying
                munmap(vaddr, length);
1367 3435f395 Markus Armbruster
                if (block->fd >= 0) {
1368 cd19cfa2 Huang Ying
#ifdef MAP_POPULATE
1369 3435f395 Markus Armbruster
                    flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1370 3435f395 Markus Armbruster
                        MAP_PRIVATE;
1371 fd28aa13 Jan Kiszka
#else
1372 3435f395 Markus Armbruster
                    flags |= MAP_PRIVATE;
1373 cd19cfa2 Huang Ying
#endif
1374 3435f395 Markus Armbruster
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1375 3435f395 Markus Armbruster
                                flags, block->fd, offset);
1376 cd19cfa2 Huang Ying
                } else {
1377 2eb9fbaa Markus Armbruster
                    /*
1378 2eb9fbaa Markus Armbruster
                     * Remap needs to match alloc.  Accelerators that
1379 2eb9fbaa Markus Armbruster
                     * set phys_mem_alloc never remap.  If they did,
1380 2eb9fbaa Markus Armbruster
                     * we'd need a remap hook here.
1381 2eb9fbaa Markus Armbruster
                     */
1382 2eb9fbaa Markus Armbruster
                    assert(phys_mem_alloc == qemu_anon_ram_alloc);
1383 2eb9fbaa Markus Armbruster
1384 cd19cfa2 Huang Ying
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1385 cd19cfa2 Huang Ying
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1386 cd19cfa2 Huang Ying
                                flags, -1, 0);
1387 cd19cfa2 Huang Ying
                }
1388 cd19cfa2 Huang Ying
                if (area != vaddr) {
1389 f15fbc4b Anthony PERARD
                    fprintf(stderr, "Could not remap addr: "
1390 f15fbc4b Anthony PERARD
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1391 cd19cfa2 Huang Ying
                            length, addr);
1392 cd19cfa2 Huang Ying
                    exit(1);
1393 cd19cfa2 Huang Ying
                }
1394 8490fc78 Luiz Capitulino
                memory_try_enable_merging(vaddr, length);
1395 ddb97f1d Jason Baron
                qemu_ram_setup_dump(vaddr, length);
1396 cd19cfa2 Huang Ying
            }
1397 cd19cfa2 Huang Ying
            return;
1398 cd19cfa2 Huang Ying
        }
1399 cd19cfa2 Huang Ying
    }
1400 cd19cfa2 Huang Ying
}
1401 cd19cfa2 Huang Ying
#endif /* !_WIN32 */
1402 cd19cfa2 Huang Ying
1403 1b5ec234 Paolo Bonzini
/* Return a host pointer to ram allocated with qemu_ram_alloc.
1404 1b5ec234 Paolo Bonzini
   With the exception of the softmmu code in this file, this should
1405 1b5ec234 Paolo Bonzini
   only be used for local memory (e.g. video ram) that the device owns,
1406 1b5ec234 Paolo Bonzini
   and knows it isn't going to access beyond the end of the block.
1407 1b5ec234 Paolo Bonzini

1408 1b5ec234 Paolo Bonzini
   It should not be used for general purpose DMA.
1409 1b5ec234 Paolo Bonzini
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1410 1b5ec234 Paolo Bonzini
 */
1411 1b5ec234 Paolo Bonzini
void *qemu_get_ram_ptr(ram_addr_t addr)
1412 1b5ec234 Paolo Bonzini
{
1413 1b5ec234 Paolo Bonzini
    RAMBlock *block = qemu_get_ram_block(addr);
1414 1b5ec234 Paolo Bonzini
1415 0d6d3c87 Paolo Bonzini
    if (xen_enabled()) {
1416 0d6d3c87 Paolo Bonzini
        /* We need to check if the requested address is in the RAM
1417 0d6d3c87 Paolo Bonzini
         * because we don't want to map the entire memory in QEMU.
1418 0d6d3c87 Paolo Bonzini
         * In that case just map until the end of the page.
1419 0d6d3c87 Paolo Bonzini
         */
1420 0d6d3c87 Paolo Bonzini
        if (block->offset == 0) {
1421 0d6d3c87 Paolo Bonzini
            return xen_map_cache(addr, 0, 0);
1422 0d6d3c87 Paolo Bonzini
        } else if (block->host == NULL) {
1423 0d6d3c87 Paolo Bonzini
            block->host =
1424 0d6d3c87 Paolo Bonzini
                xen_map_cache(block->offset, block->length, 1);
1425 0d6d3c87 Paolo Bonzini
        }
1426 0d6d3c87 Paolo Bonzini
    }
1427 0d6d3c87 Paolo Bonzini
    return block->host + (addr - block->offset);
1428 dc828ca1 pbrook
}
1429 dc828ca1 pbrook
1430 38bee5dc Stefano Stabellini
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1431 38bee5dc Stefano Stabellini
 * but takes a size argument */
1432 cb85f7ab Peter Maydell
static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1433 38bee5dc Stefano Stabellini
{
1434 8ab934f9 Stefano Stabellini
    if (*size == 0) {
1435 8ab934f9 Stefano Stabellini
        return NULL;
1436 8ab934f9 Stefano Stabellini
    }
1437 868bb33f Jan Kiszka
    if (xen_enabled()) {
1438 e41d7c69 Jan Kiszka
        return xen_map_cache(addr, *size, 1);
1439 868bb33f Jan Kiszka
    } else {
1440 38bee5dc Stefano Stabellini
        RAMBlock *block;
1441 38bee5dc Stefano Stabellini
1442 a3161038 Paolo Bonzini
        QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1443 38bee5dc Stefano Stabellini
            if (addr - block->offset < block->length) {
1444 38bee5dc Stefano Stabellini
                if (addr - block->offset + *size > block->length)
1445 38bee5dc Stefano Stabellini
                    *size = block->length - addr + block->offset;
1446 38bee5dc Stefano Stabellini
                return block->host + (addr - block->offset);
1447 38bee5dc Stefano Stabellini
            }
1448 38bee5dc Stefano Stabellini
        }
1449 38bee5dc Stefano Stabellini
1450 38bee5dc Stefano Stabellini
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1451 38bee5dc Stefano Stabellini
        abort();
1452 38bee5dc Stefano Stabellini
    }
1453 38bee5dc Stefano Stabellini
}
1454 38bee5dc Stefano Stabellini
1455 7443b437 Paolo Bonzini
/* Some of the softmmu routines need to translate from a host pointer
1456 7443b437 Paolo Bonzini
   (typically a TLB entry) back to a ram offset.  */
1457 1b5ec234 Paolo Bonzini
MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1458 5579c7f3 pbrook
{
1459 94a6b54f pbrook
    RAMBlock *block;
1460 94a6b54f pbrook
    uint8_t *host = ptr;
1461 94a6b54f pbrook
1462 868bb33f Jan Kiszka
    if (xen_enabled()) {
1463 e41d7c69 Jan Kiszka
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
1464 1b5ec234 Paolo Bonzini
        return qemu_get_ram_block(*ram_addr)->mr;
1465 712c2b41 Stefano Stabellini
    }
1466 712c2b41 Stefano Stabellini
1467 23887b79 Paolo Bonzini
    block = ram_list.mru_block;
1468 23887b79 Paolo Bonzini
    if (block && block->host && host - block->host < block->length) {
1469 23887b79 Paolo Bonzini
        goto found;
1470 23887b79 Paolo Bonzini
    }
1471 23887b79 Paolo Bonzini
1472 a3161038 Paolo Bonzini
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1473 432d268c Jun Nakajima
        /* This case append when the block is not mapped. */
1474 432d268c Jun Nakajima
        if (block->host == NULL) {
1475 432d268c Jun Nakajima
            continue;
1476 432d268c Jun Nakajima
        }
1477 f471a17e Alex Williamson
        if (host - block->host < block->length) {
1478 23887b79 Paolo Bonzini
            goto found;
1479 f471a17e Alex Williamson
        }
1480 94a6b54f pbrook
    }
1481 432d268c Jun Nakajima
1482 1b5ec234 Paolo Bonzini
    return NULL;
1483 23887b79 Paolo Bonzini
1484 23887b79 Paolo Bonzini
found:
1485 23887b79 Paolo Bonzini
    *ram_addr = block->offset + (host - block->host);
1486 1b5ec234 Paolo Bonzini
    return block->mr;
1487 e890261f Marcelo Tosatti
}
1488 f471a17e Alex Williamson
1489 a8170e5e Avi Kivity
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1490 0e0df1e2 Avi Kivity
                               uint64_t val, unsigned size)
1491 9fa3e853 bellard
{
1492 52159192 Juan Quintela
    if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1493 0e0df1e2 Avi Kivity
        tb_invalidate_phys_page_fast(ram_addr, size);
1494 3a7d929e bellard
    }
1495 0e0df1e2 Avi Kivity
    switch (size) {
1496 0e0df1e2 Avi Kivity
    case 1:
1497 0e0df1e2 Avi Kivity
        stb_p(qemu_get_ram_ptr(ram_addr), val);
1498 0e0df1e2 Avi Kivity
        break;
1499 0e0df1e2 Avi Kivity
    case 2:
1500 0e0df1e2 Avi Kivity
        stw_p(qemu_get_ram_ptr(ram_addr), val);
1501 0e0df1e2 Avi Kivity
        break;
1502 0e0df1e2 Avi Kivity
    case 4:
1503 0e0df1e2 Avi Kivity
        stl_p(qemu_get_ram_ptr(ram_addr), val);
1504 0e0df1e2 Avi Kivity
        break;
1505 0e0df1e2 Avi Kivity
    default:
1506 0e0df1e2 Avi Kivity
        abort();
1507 3a7d929e bellard
    }
1508 52159192 Juan Quintela
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1509 52159192 Juan Quintela
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
1510 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
1511 f23db169 bellard
       flushed */
1512 a2cd8c85 Juan Quintela
    if (!cpu_physical_memory_is_clean(ram_addr)) {
1513 4917cf44 Andreas Färber
        CPUArchState *env = current_cpu->env_ptr;
1514 4917cf44 Andreas Färber
        tlb_set_dirty(env, env->mem_io_vaddr);
1515 4917cf44 Andreas Färber
    }
1516 9fa3e853 bellard
}
1517 9fa3e853 bellard
1518 b018ddf6 Paolo Bonzini
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1519 b018ddf6 Paolo Bonzini
                                 unsigned size, bool is_write)
1520 b018ddf6 Paolo Bonzini
{
1521 b018ddf6 Paolo Bonzini
    return is_write;
1522 b018ddf6 Paolo Bonzini
}
1523 b018ddf6 Paolo Bonzini
1524 0e0df1e2 Avi Kivity
static const MemoryRegionOps notdirty_mem_ops = {
1525 0e0df1e2 Avi Kivity
    .write = notdirty_mem_write,
1526 b018ddf6 Paolo Bonzini
    .valid.accepts = notdirty_mem_accepts,
1527 0e0df1e2 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1528 1ccde1cb bellard
};
1529 1ccde1cb bellard
1530 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
1531 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
1532 0f459d16 pbrook
{
1533 4917cf44 Andreas Färber
    CPUArchState *env = current_cpu->env_ptr;
1534 06d55cc1 aliguori
    target_ulong pc, cs_base;
1535 0f459d16 pbrook
    target_ulong vaddr;
1536 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1537 06d55cc1 aliguori
    int cpu_flags;
1538 0f459d16 pbrook
1539 06d55cc1 aliguori
    if (env->watchpoint_hit) {
1540 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
1541 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
1542 06d55cc1 aliguori
         * current instruction. */
1543 c3affe56 Andreas Färber
        cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1544 06d55cc1 aliguori
        return;
1545 06d55cc1 aliguori
    }
1546 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1547 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1548 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
1549 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1550 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
1551 6e140f28 aliguori
            if (!env->watchpoint_hit) {
1552 6e140f28 aliguori
                env->watchpoint_hit = wp;
1553 5a316526 Blue Swirl
                tb_check_watchpoint(env);
1554 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1555 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
1556 488d6577 Max Filippov
                    cpu_loop_exit(env);
1557 6e140f28 aliguori
                } else {
1558 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1559 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1560 488d6577 Max Filippov
                    cpu_resume_from_signal(env, NULL);
1561 6e140f28 aliguori
                }
1562 06d55cc1 aliguori
            }
1563 6e140f28 aliguori
        } else {
1564 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
1565 0f459d16 pbrook
        }
1566 0f459d16 pbrook
    }
1567 0f459d16 pbrook
}
1568 0f459d16 pbrook
1569 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
1570 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
1571 6658ffb8 pbrook
   phys routines.  */
1572 a8170e5e Avi Kivity
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1573 1ec9b909 Avi Kivity
                               unsigned size)
1574 6658ffb8 pbrook
{
1575 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1576 1ec9b909 Avi Kivity
    switch (size) {
1577 1ec9b909 Avi Kivity
    case 1: return ldub_phys(addr);
1578 1ec9b909 Avi Kivity
    case 2: return lduw_phys(addr);
1579 1ec9b909 Avi Kivity
    case 4: return ldl_phys(addr);
1580 1ec9b909 Avi Kivity
    default: abort();
1581 1ec9b909 Avi Kivity
    }
1582 6658ffb8 pbrook
}
1583 6658ffb8 pbrook
1584 a8170e5e Avi Kivity
static void watch_mem_write(void *opaque, hwaddr addr,
1585 1ec9b909 Avi Kivity
                            uint64_t val, unsigned size)
1586 6658ffb8 pbrook
{
1587 1ec9b909 Avi Kivity
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1588 1ec9b909 Avi Kivity
    switch (size) {
1589 67364150 Max Filippov
    case 1:
1590 67364150 Max Filippov
        stb_phys(addr, val);
1591 67364150 Max Filippov
        break;
1592 67364150 Max Filippov
    case 2:
1593 67364150 Max Filippov
        stw_phys(addr, val);
1594 67364150 Max Filippov
        break;
1595 67364150 Max Filippov
    case 4:
1596 67364150 Max Filippov
        stl_phys(addr, val);
1597 67364150 Max Filippov
        break;
1598 1ec9b909 Avi Kivity
    default: abort();
1599 1ec9b909 Avi Kivity
    }
1600 6658ffb8 pbrook
}
1601 6658ffb8 pbrook
1602 1ec9b909 Avi Kivity
static const MemoryRegionOps watch_mem_ops = {
1603 1ec9b909 Avi Kivity
    .read = watch_mem_read,
1604 1ec9b909 Avi Kivity
    .write = watch_mem_write,
1605 1ec9b909 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1606 6658ffb8 pbrook
};
1607 6658ffb8 pbrook
1608 a8170e5e Avi Kivity
static uint64_t subpage_read(void *opaque, hwaddr addr,
1609 70c68e44 Avi Kivity
                             unsigned len)
1610 db7b5426 blueswir1
{
1611 acc9d80b Jan Kiszka
    subpage_t *subpage = opaque;
1612 acc9d80b Jan Kiszka
    uint8_t buf[4];
1613 791af8c8 Paolo Bonzini
1614 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1615 016e9d62 Amos Kong
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
1616 acc9d80b Jan Kiszka
           subpage, len, addr);
1617 db7b5426 blueswir1
#endif
1618 acc9d80b Jan Kiszka
    address_space_read(subpage->as, addr + subpage->base, buf, len);
1619 acc9d80b Jan Kiszka
    switch (len) {
1620 acc9d80b Jan Kiszka
    case 1:
1621 acc9d80b Jan Kiszka
        return ldub_p(buf);
1622 acc9d80b Jan Kiszka
    case 2:
1623 acc9d80b Jan Kiszka
        return lduw_p(buf);
1624 acc9d80b Jan Kiszka
    case 4:
1625 acc9d80b Jan Kiszka
        return ldl_p(buf);
1626 acc9d80b Jan Kiszka
    default:
1627 acc9d80b Jan Kiszka
        abort();
1628 acc9d80b Jan Kiszka
    }
1629 db7b5426 blueswir1
}
1630 db7b5426 blueswir1
1631 a8170e5e Avi Kivity
static void subpage_write(void *opaque, hwaddr addr,
1632 70c68e44 Avi Kivity
                          uint64_t value, unsigned len)
1633 db7b5426 blueswir1
{
1634 acc9d80b Jan Kiszka
    subpage_t *subpage = opaque;
1635 acc9d80b Jan Kiszka
    uint8_t buf[4];
1636 acc9d80b Jan Kiszka
1637 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1638 016e9d62 Amos Kong
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx
1639 acc9d80b Jan Kiszka
           " value %"PRIx64"\n",
1640 acc9d80b Jan Kiszka
           __func__, subpage, len, addr, value);
1641 db7b5426 blueswir1
#endif
1642 acc9d80b Jan Kiszka
    switch (len) {
1643 acc9d80b Jan Kiszka
    case 1:
1644 acc9d80b Jan Kiszka
        stb_p(buf, value);
1645 acc9d80b Jan Kiszka
        break;
1646 acc9d80b Jan Kiszka
    case 2:
1647 acc9d80b Jan Kiszka
        stw_p(buf, value);
1648 acc9d80b Jan Kiszka
        break;
1649 acc9d80b Jan Kiszka
    case 4:
1650 acc9d80b Jan Kiszka
        stl_p(buf, value);
1651 acc9d80b Jan Kiszka
        break;
1652 acc9d80b Jan Kiszka
    default:
1653 acc9d80b Jan Kiszka
        abort();
1654 acc9d80b Jan Kiszka
    }
1655 acc9d80b Jan Kiszka
    address_space_write(subpage->as, addr + subpage->base, buf, len);
1656 db7b5426 blueswir1
}
1657 db7b5426 blueswir1
1658 c353e4cc Paolo Bonzini
static bool subpage_accepts(void *opaque, hwaddr addr,
1659 016e9d62 Amos Kong
                            unsigned len, bool is_write)
1660 c353e4cc Paolo Bonzini
{
1661 acc9d80b Jan Kiszka
    subpage_t *subpage = opaque;
1662 c353e4cc Paolo Bonzini
#if defined(DEBUG_SUBPAGE)
1663 016e9d62 Amos Kong
    printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
1664 acc9d80b Jan Kiszka
           __func__, subpage, is_write ? 'w' : 'r', len, addr);
1665 c353e4cc Paolo Bonzini
#endif
1666 c353e4cc Paolo Bonzini
1667 acc9d80b Jan Kiszka
    return address_space_access_valid(subpage->as, addr + subpage->base,
1668 016e9d62 Amos Kong
                                      len, is_write);
1669 c353e4cc Paolo Bonzini
}
1670 c353e4cc Paolo Bonzini
1671 70c68e44 Avi Kivity
static const MemoryRegionOps subpage_ops = {
1672 70c68e44 Avi Kivity
    .read = subpage_read,
1673 70c68e44 Avi Kivity
    .write = subpage_write,
1674 c353e4cc Paolo Bonzini
    .valid.accepts = subpage_accepts,
1675 70c68e44 Avi Kivity
    .endianness = DEVICE_NATIVE_ENDIAN,
1676 db7b5426 blueswir1
};
1677 db7b5426 blueswir1
1678 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1679 5312bd8b Avi Kivity
                             uint16_t section)
1680 db7b5426 blueswir1
{
1681 db7b5426 blueswir1
    int idx, eidx;
1682 db7b5426 blueswir1
1683 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1684 db7b5426 blueswir1
        return -1;
1685 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
1686 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
1687 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1688 016e9d62 Amos Kong
    printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1689 016e9d62 Amos Kong
           __func__, mmio, start, end, idx, eidx, section);
1690 db7b5426 blueswir1
#endif
1691 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
1692 5312bd8b Avi Kivity
        mmio->sub_section[idx] = section;
1693 db7b5426 blueswir1
    }
1694 db7b5426 blueswir1
1695 db7b5426 blueswir1
    return 0;
1696 db7b5426 blueswir1
}
1697 db7b5426 blueswir1
1698 acc9d80b Jan Kiszka
static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1699 db7b5426 blueswir1
{
1700 c227f099 Anthony Liguori
    subpage_t *mmio;
1701 db7b5426 blueswir1
1702 7267c094 Anthony Liguori
    mmio = g_malloc0(sizeof(subpage_t));
1703 1eec614b aliguori
1704 acc9d80b Jan Kiszka
    mmio->as = as;
1705 1eec614b aliguori
    mmio->base = base;
1706 2c9b15ca Paolo Bonzini
    memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1707 70c68e44 Avi Kivity
                          "subpage", TARGET_PAGE_SIZE);
1708 b3b00c78 Avi Kivity
    mmio->iomem.subpage = true;
1709 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
1710 016e9d62 Amos Kong
    printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1711 016e9d62 Amos Kong
           mmio, base, TARGET_PAGE_SIZE);
1712 db7b5426 blueswir1
#endif
1713 b41aac4f Liu Ping Fan
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1714 db7b5426 blueswir1
1715 db7b5426 blueswir1
    return mmio;
1716 db7b5426 blueswir1
}
1717 db7b5426 blueswir1
1718 53cb28cb Marcel Apfelbaum
static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
1719 5312bd8b Avi Kivity
{
1720 5312bd8b Avi Kivity
    MemoryRegionSection section = {
1721 5312bd8b Avi Kivity
        .mr = mr,
1722 5312bd8b Avi Kivity
        .offset_within_address_space = 0,
1723 5312bd8b Avi Kivity
        .offset_within_region = 0,
1724 052e87b0 Paolo Bonzini
        .size = int128_2_64(),
1725 5312bd8b Avi Kivity
    };
1726 5312bd8b Avi Kivity
1727 53cb28cb Marcel Apfelbaum
    return phys_section_add(map, &section);
1728 5312bd8b Avi Kivity
}
1729 5312bd8b Avi Kivity
1730 a8170e5e Avi Kivity
MemoryRegion *iotlb_to_region(hwaddr index)
1731 aa102231 Avi Kivity
{
1732 53cb28cb Marcel Apfelbaum
    return address_space_memory.dispatch->map.sections[
1733 53cb28cb Marcel Apfelbaum
           index & ~TARGET_PAGE_MASK].mr;
1734 aa102231 Avi Kivity
}
1735 aa102231 Avi Kivity
1736 e9179ce1 Avi Kivity
static void io_mem_init(void)
1737 e9179ce1 Avi Kivity
{
1738 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1739 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1740 0e0df1e2 Avi Kivity
                          "unassigned", UINT64_MAX);
1741 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1742 0e0df1e2 Avi Kivity
                          "notdirty", UINT64_MAX);
1743 2c9b15ca Paolo Bonzini
    memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1744 1ec9b909 Avi Kivity
                          "watch", UINT64_MAX);
1745 e9179ce1 Avi Kivity
}
1746 e9179ce1 Avi Kivity
1747 ac1970fb Avi Kivity
static void mem_begin(MemoryListener *listener)
1748 ac1970fb Avi Kivity
{
1749 89ae337a Paolo Bonzini
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1750 53cb28cb Marcel Apfelbaum
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1751 53cb28cb Marcel Apfelbaum
    uint16_t n;
1752 53cb28cb Marcel Apfelbaum
1753 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_unassigned);
1754 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_UNASSIGNED);
1755 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_notdirty);
1756 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_NOTDIRTY);
1757 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_rom);
1758 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_ROM);
1759 53cb28cb Marcel Apfelbaum
    n = dummy_section(&d->map, &io_mem_watch);
1760 53cb28cb Marcel Apfelbaum
    assert(n == PHYS_SECTION_WATCH);
1761 00752703 Paolo Bonzini
1762 9736e55b Michael S. Tsirkin
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
1763 00752703 Paolo Bonzini
    d->as = as;
1764 00752703 Paolo Bonzini
    as->next_dispatch = d;
1765 00752703 Paolo Bonzini
}
1766 00752703 Paolo Bonzini
1767 00752703 Paolo Bonzini
static void mem_commit(MemoryListener *listener)
1768 00752703 Paolo Bonzini
{
1769 00752703 Paolo Bonzini
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1770 0475d94f Paolo Bonzini
    AddressSpaceDispatch *cur = as->dispatch;
1771 0475d94f Paolo Bonzini
    AddressSpaceDispatch *next = as->next_dispatch;
1772 0475d94f Paolo Bonzini
1773 53cb28cb Marcel Apfelbaum
    phys_page_compact_all(next, next->map.nodes_nb);
1774 b35ba30f Michael S. Tsirkin
1775 0475d94f Paolo Bonzini
    as->dispatch = next;
1776 b41aac4f Liu Ping Fan
1777 53cb28cb Marcel Apfelbaum
    if (cur) {
1778 53cb28cb Marcel Apfelbaum
        phys_sections_free(&cur->map);
1779 53cb28cb Marcel Apfelbaum
        g_free(cur);
1780 53cb28cb Marcel Apfelbaum
    }
1781 9affd6fc Paolo Bonzini
}
1782 9affd6fc Paolo Bonzini
1783 1d71148e Avi Kivity
static void tcg_commit(MemoryListener *listener)
1784 50c1e149 Avi Kivity
{
1785 182735ef Andreas Färber
    CPUState *cpu;
1786 117712c3 Avi Kivity
1787 117712c3 Avi Kivity
    /* since each CPU stores ram addresses in its TLB cache, we must
1788 117712c3 Avi Kivity
       reset the modified entries */
1789 117712c3 Avi Kivity
    /* XXX: slow ! */
1790 bdc44640 Andreas Färber
    CPU_FOREACH(cpu) {
1791 182735ef Andreas Färber
        CPUArchState *env = cpu->env_ptr;
1792 182735ef Andreas Färber
1793 117712c3 Avi Kivity
        tlb_flush(env, 1);
1794 117712c3 Avi Kivity
    }
1795 50c1e149 Avi Kivity
}
1796 50c1e149 Avi Kivity
1797 93632747 Avi Kivity
static void core_log_global_start(MemoryListener *listener)
1798 93632747 Avi Kivity
{
1799 981fdf23 Juan Quintela
    cpu_physical_memory_set_dirty_tracking(true);
1800 93632747 Avi Kivity
}
1801 93632747 Avi Kivity
1802 93632747 Avi Kivity
static void core_log_global_stop(MemoryListener *listener)
1803 93632747 Avi Kivity
{
1804 981fdf23 Juan Quintela
    cpu_physical_memory_set_dirty_tracking(false);
1805 93632747 Avi Kivity
}
1806 93632747 Avi Kivity
1807 93632747 Avi Kivity
static MemoryListener core_memory_listener = {
1808 93632747 Avi Kivity
    .log_global_start = core_log_global_start,
1809 93632747 Avi Kivity
    .log_global_stop = core_log_global_stop,
1810 ac1970fb Avi Kivity
    .priority = 1,
1811 93632747 Avi Kivity
};
1812 93632747 Avi Kivity
1813 1d71148e Avi Kivity
static MemoryListener tcg_memory_listener = {
1814 1d71148e Avi Kivity
    .commit = tcg_commit,
1815 1d71148e Avi Kivity
};
1816 1d71148e Avi Kivity
1817 ac1970fb Avi Kivity
void address_space_init_dispatch(AddressSpace *as)
1818 ac1970fb Avi Kivity
{
1819 00752703 Paolo Bonzini
    as->dispatch = NULL;
1820 89ae337a Paolo Bonzini
    as->dispatch_listener = (MemoryListener) {
1821 ac1970fb Avi Kivity
        .begin = mem_begin,
1822 00752703 Paolo Bonzini
        .commit = mem_commit,
1823 ac1970fb Avi Kivity
        .region_add = mem_add,
1824 ac1970fb Avi Kivity
        .region_nop = mem_add,
1825 ac1970fb Avi Kivity
        .priority = 0,
1826 ac1970fb Avi Kivity
    };
1827 89ae337a Paolo Bonzini
    memory_listener_register(&as->dispatch_listener, as);
1828 ac1970fb Avi Kivity
}
1829 ac1970fb Avi Kivity
1830 83f3c251 Avi Kivity
void address_space_destroy_dispatch(AddressSpace *as)
1831 83f3c251 Avi Kivity
{
1832 83f3c251 Avi Kivity
    AddressSpaceDispatch *d = as->dispatch;
1833 83f3c251 Avi Kivity
1834 89ae337a Paolo Bonzini
    memory_listener_unregister(&as->dispatch_listener);
1835 83f3c251 Avi Kivity
    g_free(d);
1836 83f3c251 Avi Kivity
    as->dispatch = NULL;
1837 83f3c251 Avi Kivity
}
1838 83f3c251 Avi Kivity
1839 62152b8a Avi Kivity
static void memory_map_init(void)
1840 62152b8a Avi Kivity
{
1841 7267c094 Anthony Liguori
    system_memory = g_malloc(sizeof(*system_memory));
1842 03f49957 Paolo Bonzini
1843 57271d63 Paolo Bonzini
    memory_region_init(system_memory, NULL, "system", UINT64_MAX);
1844 7dca8043 Alexey Kardashevskiy
    address_space_init(&address_space_memory, system_memory, "memory");
1845 309cb471 Avi Kivity
1846 7267c094 Anthony Liguori
    system_io = g_malloc(sizeof(*system_io));
1847 3bb28b72 Jan Kiszka
    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1848 3bb28b72 Jan Kiszka
                          65536);
1849 7dca8043 Alexey Kardashevskiy
    address_space_init(&address_space_io, system_io, "I/O");
1850 93632747 Avi Kivity
1851 f6790af6 Avi Kivity
    memory_listener_register(&core_memory_listener, &address_space_memory);
1852 2641689a liguang
    if (tcg_enabled()) {
1853 2641689a liguang
        memory_listener_register(&tcg_memory_listener, &address_space_memory);
1854 2641689a liguang
    }
1855 62152b8a Avi Kivity
}
1856 62152b8a Avi Kivity
1857 62152b8a Avi Kivity
MemoryRegion *get_system_memory(void)
1858 62152b8a Avi Kivity
{
1859 62152b8a Avi Kivity
    return system_memory;
1860 62152b8a Avi Kivity
}
1861 62152b8a Avi Kivity
1862 309cb471 Avi Kivity
MemoryRegion *get_system_io(void)
1863 309cb471 Avi Kivity
{
1864 309cb471 Avi Kivity
    return system_io;
1865 309cb471 Avi Kivity
}
1866 309cb471 Avi Kivity
1867 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
1868 e2eef170 pbrook
1869 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
1870 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
1871 f17ec444 Andreas Färber
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1872 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
1873 13eb76e0 bellard
{
1874 13eb76e0 bellard
    int l, flags;
1875 13eb76e0 bellard
    target_ulong page;
1876 53a5960a pbrook
    void * p;
1877 13eb76e0 bellard
1878 13eb76e0 bellard
    while (len > 0) {
1879 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
1880 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
1881 13eb76e0 bellard
        if (l > len)
1882 13eb76e0 bellard
            l = len;
1883 13eb76e0 bellard
        flags = page_get_flags(page);
1884 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
1885 a68fe89c Paul Brook
            return -1;
1886 13eb76e0 bellard
        if (is_write) {
1887 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
1888 a68fe89c Paul Brook
                return -1;
1889 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1890 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1891 a68fe89c Paul Brook
                return -1;
1892 72fb7daa aurel32
            memcpy(p, buf, l);
1893 72fb7daa aurel32
            unlock_user(p, addr, l);
1894 13eb76e0 bellard
        } else {
1895 13eb76e0 bellard
            if (!(flags & PAGE_READ))
1896 a68fe89c Paul Brook
                return -1;
1897 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
1898 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1899 a68fe89c Paul Brook
                return -1;
1900 72fb7daa aurel32
            memcpy(buf, p, l);
1901 5b257578 aurel32
            unlock_user(p, addr, 0);
1902 13eb76e0 bellard
        }
1903 13eb76e0 bellard
        len -= l;
1904 13eb76e0 bellard
        buf += l;
1905 13eb76e0 bellard
        addr += l;
1906 13eb76e0 bellard
    }
1907 a68fe89c Paul Brook
    return 0;
1908 13eb76e0 bellard
}
1909 8df1cd07 bellard
1910 13eb76e0 bellard
#else
1911 51d7a9eb Anthony PERARD
1912 a8170e5e Avi Kivity
static void invalidate_and_set_dirty(hwaddr addr,
1913 a8170e5e Avi Kivity
                                     hwaddr length)
1914 51d7a9eb Anthony PERARD
{
1915 a2cd8c85 Juan Quintela
    if (cpu_physical_memory_is_clean(addr)) {
1916 51d7a9eb Anthony PERARD
        /* invalidate code */
1917 51d7a9eb Anthony PERARD
        tb_invalidate_phys_page_range(addr, addr + length, 0);
1918 51d7a9eb Anthony PERARD
        /* set dirty bit */
1919 52159192 Juan Quintela
        cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1920 52159192 Juan Quintela
        cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
1921 51d7a9eb Anthony PERARD
    }
1922 e226939d Anthony PERARD
    xen_modified_memory(addr, length);
1923 51d7a9eb Anthony PERARD
}
1924 51d7a9eb Anthony PERARD
1925 2bbfa05d Paolo Bonzini
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1926 2bbfa05d Paolo Bonzini
{
1927 2bbfa05d Paolo Bonzini
    if (memory_region_is_ram(mr)) {
1928 2bbfa05d Paolo Bonzini
        return !(is_write && mr->readonly);
1929 2bbfa05d Paolo Bonzini
    }
1930 2bbfa05d Paolo Bonzini
    if (memory_region_is_romd(mr)) {
1931 2bbfa05d Paolo Bonzini
        return !is_write;
1932 2bbfa05d Paolo Bonzini
    }
1933 2bbfa05d Paolo Bonzini
1934 2bbfa05d Paolo Bonzini
    return false;
1935 2bbfa05d Paolo Bonzini
}
1936 2bbfa05d Paolo Bonzini
1937 23326164 Richard Henderson
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1938 82f2563f Paolo Bonzini
{
1939 e1622f4b Paolo Bonzini
    unsigned access_size_max = mr->ops->valid.max_access_size;
1940 23326164 Richard Henderson
1941 23326164 Richard Henderson
    /* Regions are assumed to support 1-4 byte accesses unless
1942 23326164 Richard Henderson
       otherwise specified.  */
1943 23326164 Richard Henderson
    if (access_size_max == 0) {
1944 23326164 Richard Henderson
        access_size_max = 4;
1945 23326164 Richard Henderson
    }
1946 23326164 Richard Henderson
1947 23326164 Richard Henderson
    /* Bound the maximum access by the alignment of the address.  */
1948 23326164 Richard Henderson
    if (!mr->ops->impl.unaligned) {
1949 23326164 Richard Henderson
        unsigned align_size_max = addr & -addr;
1950 23326164 Richard Henderson
        if (align_size_max != 0 && align_size_max < access_size_max) {
1951 23326164 Richard Henderson
            access_size_max = align_size_max;
1952 23326164 Richard Henderson
        }
1953 82f2563f Paolo Bonzini
    }
1954 23326164 Richard Henderson
1955 23326164 Richard Henderson
    /* Don't attempt accesses larger than the maximum.  */
1956 23326164 Richard Henderson
    if (l > access_size_max) {
1957 23326164 Richard Henderson
        l = access_size_max;
1958 82f2563f Paolo Bonzini
    }
1959 098178f2 Paolo Bonzini
    if (l & (l - 1)) {
1960 098178f2 Paolo Bonzini
        l = 1 << (qemu_fls(l) - 1);
1961 098178f2 Paolo Bonzini
    }
1962 23326164 Richard Henderson
1963 23326164 Richard Henderson
    return l;
1964 82f2563f Paolo Bonzini
}
1965 82f2563f Paolo Bonzini
1966 fd8aaa76 Paolo Bonzini
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1967 ac1970fb Avi Kivity
                      int len, bool is_write)
1968 13eb76e0 bellard
{
1969 149f54b5 Paolo Bonzini
    hwaddr l;
1970 13eb76e0 bellard
    uint8_t *ptr;
1971 791af8c8 Paolo Bonzini
    uint64_t val;
1972 149f54b5 Paolo Bonzini
    hwaddr addr1;
1973 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
1974 fd8aaa76 Paolo Bonzini
    bool error = false;
1975 3b46e624 ths
1976 13eb76e0 bellard
    while (len > 0) {
1977 149f54b5 Paolo Bonzini
        l = len;
1978 5c8a00ce Paolo Bonzini
        mr = address_space_translate(as, addr, &addr1, &l, is_write);
1979 3b46e624 ths
1980 13eb76e0 bellard
        if (is_write) {
1981 5c8a00ce Paolo Bonzini
            if (!memory_access_is_direct(mr, is_write)) {
1982 5c8a00ce Paolo Bonzini
                l = memory_access_size(mr, l, addr1);
1983 4917cf44 Andreas Färber
                /* XXX: could force current_cpu to NULL to avoid
1984 6a00d601 bellard
                   potential bugs */
1985 23326164 Richard Henderson
                switch (l) {
1986 23326164 Richard Henderson
                case 8:
1987 23326164 Richard Henderson
                    /* 64 bit write access */
1988 23326164 Richard Henderson
                    val = ldq_p(buf);
1989 23326164 Richard Henderson
                    error |= io_mem_write(mr, addr1, val, 8);
1990 23326164 Richard Henderson
                    break;
1991 23326164 Richard Henderson
                case 4:
1992 1c213d19 bellard
                    /* 32 bit write access */
1993 c27004ec bellard
                    val = ldl_p(buf);
1994 5c8a00ce Paolo Bonzini
                    error |= io_mem_write(mr, addr1, val, 4);
1995 23326164 Richard Henderson
                    break;
1996 23326164 Richard Henderson
                case 2:
1997 1c213d19 bellard
                    /* 16 bit write access */
1998 c27004ec bellard
                    val = lduw_p(buf);
1999 5c8a00ce Paolo Bonzini
                    error |= io_mem_write(mr, addr1, val, 2);
2000 23326164 Richard Henderson
                    break;
2001 23326164 Richard Henderson
                case 1:
2002 1c213d19 bellard
                    /* 8 bit write access */
2003 c27004ec bellard
                    val = ldub_p(buf);
2004 5c8a00ce Paolo Bonzini
                    error |= io_mem_write(mr, addr1, val, 1);
2005 23326164 Richard Henderson
                    break;
2006 23326164 Richard Henderson
                default:
2007 23326164 Richard Henderson
                    abort();
2008 13eb76e0 bellard
                }
2009 2bbfa05d Paolo Bonzini
            } else {
2010 5c8a00ce Paolo Bonzini
                addr1 += memory_region_get_ram_addr(mr);
2011 13eb76e0 bellard
                /* RAM case */
2012 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
2013 13eb76e0 bellard
                memcpy(ptr, buf, l);
2014 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
2015 13eb76e0 bellard
            }
2016 13eb76e0 bellard
        } else {
2017 5c8a00ce Paolo Bonzini
            if (!memory_access_is_direct(mr, is_write)) {
2018 13eb76e0 bellard
                /* I/O case */
2019 5c8a00ce Paolo Bonzini
                l = memory_access_size(mr, l, addr1);
2020 23326164 Richard Henderson
                switch (l) {
2021 23326164 Richard Henderson
                case 8:
2022 23326164 Richard Henderson
                    /* 64 bit read access */
2023 23326164 Richard Henderson
                    error |= io_mem_read(mr, addr1, &val, 8);
2024 23326164 Richard Henderson
                    stq_p(buf, val);
2025 23326164 Richard Henderson
                    break;
2026 23326164 Richard Henderson
                case 4:
2027 13eb76e0 bellard
                    /* 32 bit read access */
2028 5c8a00ce Paolo Bonzini
                    error |= io_mem_read(mr, addr1, &val, 4);
2029 c27004ec bellard
                    stl_p(buf, val);
2030 23326164 Richard Henderson
                    break;
2031 23326164 Richard Henderson
                case 2:
2032 13eb76e0 bellard
                    /* 16 bit read access */
2033 5c8a00ce Paolo Bonzini
                    error |= io_mem_read(mr, addr1, &val, 2);
2034 c27004ec bellard
                    stw_p(buf, val);
2035 23326164 Richard Henderson
                    break;
2036 23326164 Richard Henderson
                case 1:
2037 1c213d19 bellard
                    /* 8 bit read access */
2038 5c8a00ce Paolo Bonzini
                    error |= io_mem_read(mr, addr1, &val, 1);
2039 c27004ec bellard
                    stb_p(buf, val);
2040 23326164 Richard Henderson
                    break;
2041 23326164 Richard Henderson
                default:
2042 23326164 Richard Henderson
                    abort();
2043 13eb76e0 bellard
                }
2044 13eb76e0 bellard
            } else {
2045 13eb76e0 bellard
                /* RAM case */
2046 5c8a00ce Paolo Bonzini
                ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2047 f3705d53 Avi Kivity
                memcpy(buf, ptr, l);
2048 13eb76e0 bellard
            }
2049 13eb76e0 bellard
        }
2050 13eb76e0 bellard
        len -= l;
2051 13eb76e0 bellard
        buf += l;
2052 13eb76e0 bellard
        addr += l;
2053 13eb76e0 bellard
    }
2054 fd8aaa76 Paolo Bonzini
2055 fd8aaa76 Paolo Bonzini
    return error;
2056 13eb76e0 bellard
}
2057 8df1cd07 bellard
2058 fd8aaa76 Paolo Bonzini
bool address_space_write(AddressSpace *as, hwaddr addr,
2059 ac1970fb Avi Kivity
                         const uint8_t *buf, int len)
2060 ac1970fb Avi Kivity
{
2061 fd8aaa76 Paolo Bonzini
    return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2062 ac1970fb Avi Kivity
}
2063 ac1970fb Avi Kivity
2064 fd8aaa76 Paolo Bonzini
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2065 ac1970fb Avi Kivity
{
2066 fd8aaa76 Paolo Bonzini
    return address_space_rw(as, addr, buf, len, false);
2067 ac1970fb Avi Kivity
}
2068 ac1970fb Avi Kivity
2069 ac1970fb Avi Kivity
2070 a8170e5e Avi Kivity
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2071 ac1970fb Avi Kivity
                            int len, int is_write)
2072 ac1970fb Avi Kivity
{
2073 fd8aaa76 Paolo Bonzini
    address_space_rw(&address_space_memory, addr, buf, len, is_write);
2074 ac1970fb Avi Kivity
}
2075 ac1970fb Avi Kivity
2076 582b55a9 Alexander Graf
enum write_rom_type {
2077 582b55a9 Alexander Graf
    WRITE_DATA,
2078 582b55a9 Alexander Graf
    FLUSH_CACHE,
2079 582b55a9 Alexander Graf
};
2080 582b55a9 Alexander Graf
2081 582b55a9 Alexander Graf
static inline void cpu_physical_memory_write_rom_internal(
2082 582b55a9 Alexander Graf
    hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2083 d0ecd2aa bellard
{
2084 149f54b5 Paolo Bonzini
    hwaddr l;
2085 d0ecd2aa bellard
    uint8_t *ptr;
2086 149f54b5 Paolo Bonzini
    hwaddr addr1;
2087 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2088 3b46e624 ths
2089 d0ecd2aa bellard
    while (len > 0) {
2090 149f54b5 Paolo Bonzini
        l = len;
2091 5c8a00ce Paolo Bonzini
        mr = address_space_translate(&address_space_memory,
2092 5c8a00ce Paolo Bonzini
                                     addr, &addr1, &l, true);
2093 3b46e624 ths
2094 5c8a00ce Paolo Bonzini
        if (!(memory_region_is_ram(mr) ||
2095 5c8a00ce Paolo Bonzini
              memory_region_is_romd(mr))) {
2096 d0ecd2aa bellard
            /* do nothing */
2097 d0ecd2aa bellard
        } else {
2098 5c8a00ce Paolo Bonzini
            addr1 += memory_region_get_ram_addr(mr);
2099 d0ecd2aa bellard
            /* ROM/RAM case */
2100 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
2101 582b55a9 Alexander Graf
            switch (type) {
2102 582b55a9 Alexander Graf
            case WRITE_DATA:
2103 582b55a9 Alexander Graf
                memcpy(ptr, buf, l);
2104 582b55a9 Alexander Graf
                invalidate_and_set_dirty(addr1, l);
2105 582b55a9 Alexander Graf
                break;
2106 582b55a9 Alexander Graf
            case FLUSH_CACHE:
2107 582b55a9 Alexander Graf
                flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2108 582b55a9 Alexander Graf
                break;
2109 582b55a9 Alexander Graf
            }
2110 d0ecd2aa bellard
        }
2111 d0ecd2aa bellard
        len -= l;
2112 d0ecd2aa bellard
        buf += l;
2113 d0ecd2aa bellard
        addr += l;
2114 d0ecd2aa bellard
    }
2115 d0ecd2aa bellard
}
2116 d0ecd2aa bellard
2117 582b55a9 Alexander Graf
/* used for ROM loading : can write in RAM and ROM */
2118 582b55a9 Alexander Graf
void cpu_physical_memory_write_rom(hwaddr addr,
2119 582b55a9 Alexander Graf
                                   const uint8_t *buf, int len)
2120 582b55a9 Alexander Graf
{
2121 582b55a9 Alexander Graf
    cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2122 582b55a9 Alexander Graf
}
2123 582b55a9 Alexander Graf
2124 582b55a9 Alexander Graf
void cpu_flush_icache_range(hwaddr start, int len)
2125 582b55a9 Alexander Graf
{
2126 582b55a9 Alexander Graf
    /*
2127 582b55a9 Alexander Graf
     * This function should do the same thing as an icache flush that was
2128 582b55a9 Alexander Graf
     * triggered from within the guest. For TCG we are always cache coherent,
2129 582b55a9 Alexander Graf
     * so there is no need to flush anything. For KVM / Xen we need to flush
2130 582b55a9 Alexander Graf
     * the host's instruction cache at least.
2131 582b55a9 Alexander Graf
     */
2132 582b55a9 Alexander Graf
    if (tcg_enabled()) {
2133 582b55a9 Alexander Graf
        return;
2134 582b55a9 Alexander Graf
    }
2135 582b55a9 Alexander Graf
2136 582b55a9 Alexander Graf
    cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2137 582b55a9 Alexander Graf
}
2138 582b55a9 Alexander Graf
2139 6d16c2f8 aliguori
typedef struct {
2140 d3e71559 Paolo Bonzini
    MemoryRegion *mr;
2141 6d16c2f8 aliguori
    void *buffer;
2142 a8170e5e Avi Kivity
    hwaddr addr;
2143 a8170e5e Avi Kivity
    hwaddr len;
2144 6d16c2f8 aliguori
} BounceBuffer;
2145 6d16c2f8 aliguori
2146 6d16c2f8 aliguori
static BounceBuffer bounce;
2147 6d16c2f8 aliguori
2148 ba223c29 aliguori
typedef struct MapClient {
2149 ba223c29 aliguori
    void *opaque;
2150 ba223c29 aliguori
    void (*callback)(void *opaque);
2151 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
2152 ba223c29 aliguori
} MapClient;
2153 ba223c29 aliguori
2154 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
2155 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
2156 ba223c29 aliguori
2157 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2158 ba223c29 aliguori
{
2159 7267c094 Anthony Liguori
    MapClient *client = g_malloc(sizeof(*client));
2160 ba223c29 aliguori
2161 ba223c29 aliguori
    client->opaque = opaque;
2162 ba223c29 aliguori
    client->callback = callback;
2163 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
2164 ba223c29 aliguori
    return client;
2165 ba223c29 aliguori
}
2166 ba223c29 aliguori
2167 8b9c99d9 Blue Swirl
static void cpu_unregister_map_client(void *_client)
2168 ba223c29 aliguori
{
2169 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
2170 ba223c29 aliguori
2171 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
2172 7267c094 Anthony Liguori
    g_free(client);
2173 ba223c29 aliguori
}
2174 ba223c29 aliguori
2175 ba223c29 aliguori
static void cpu_notify_map_clients(void)
2176 ba223c29 aliguori
{
2177 ba223c29 aliguori
    MapClient *client;
2178 ba223c29 aliguori
2179 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
2180 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
2181 ba223c29 aliguori
        client->callback(client->opaque);
2182 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
2183 ba223c29 aliguori
    }
2184 ba223c29 aliguori
}
2185 ba223c29 aliguori
2186 51644ab7 Paolo Bonzini
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2187 51644ab7 Paolo Bonzini
{
2188 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2189 51644ab7 Paolo Bonzini
    hwaddr l, xlat;
2190 51644ab7 Paolo Bonzini
2191 51644ab7 Paolo Bonzini
    while (len > 0) {
2192 51644ab7 Paolo Bonzini
        l = len;
2193 5c8a00ce Paolo Bonzini
        mr = address_space_translate(as, addr, &xlat, &l, is_write);
2194 5c8a00ce Paolo Bonzini
        if (!memory_access_is_direct(mr, is_write)) {
2195 5c8a00ce Paolo Bonzini
            l = memory_access_size(mr, l, addr);
2196 5c8a00ce Paolo Bonzini
            if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2197 51644ab7 Paolo Bonzini
                return false;
2198 51644ab7 Paolo Bonzini
            }
2199 51644ab7 Paolo Bonzini
        }
2200 51644ab7 Paolo Bonzini
2201 51644ab7 Paolo Bonzini
        len -= l;
2202 51644ab7 Paolo Bonzini
        addr += l;
2203 51644ab7 Paolo Bonzini
    }
2204 51644ab7 Paolo Bonzini
    return true;
2205 51644ab7 Paolo Bonzini
}
2206 51644ab7 Paolo Bonzini
2207 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
2208 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
2209 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
2210 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
2211 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
2212 ba223c29 aliguori
 * likely to succeed.
2213 6d16c2f8 aliguori
 */
2214 ac1970fb Avi Kivity
void *address_space_map(AddressSpace *as,
2215 a8170e5e Avi Kivity
                        hwaddr addr,
2216 a8170e5e Avi Kivity
                        hwaddr *plen,
2217 ac1970fb Avi Kivity
                        bool is_write)
2218 6d16c2f8 aliguori
{
2219 a8170e5e Avi Kivity
    hwaddr len = *plen;
2220 e3127ae0 Paolo Bonzini
    hwaddr done = 0;
2221 e3127ae0 Paolo Bonzini
    hwaddr l, xlat, base;
2222 e3127ae0 Paolo Bonzini
    MemoryRegion *mr, *this_mr;
2223 e3127ae0 Paolo Bonzini
    ram_addr_t raddr;
2224 6d16c2f8 aliguori
2225 e3127ae0 Paolo Bonzini
    if (len == 0) {
2226 e3127ae0 Paolo Bonzini
        return NULL;
2227 e3127ae0 Paolo Bonzini
    }
2228 38bee5dc Stefano Stabellini
2229 e3127ae0 Paolo Bonzini
    l = len;
2230 e3127ae0 Paolo Bonzini
    mr = address_space_translate(as, addr, &xlat, &l, is_write);
2231 e3127ae0 Paolo Bonzini
    if (!memory_access_is_direct(mr, is_write)) {
2232 e3127ae0 Paolo Bonzini
        if (bounce.buffer) {
2233 e3127ae0 Paolo Bonzini
            return NULL;
2234 6d16c2f8 aliguori
        }
2235 e85d9db5 Kevin Wolf
        /* Avoid unbounded allocations */
2236 e85d9db5 Kevin Wolf
        l = MIN(l, TARGET_PAGE_SIZE);
2237 e85d9db5 Kevin Wolf
        bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2238 e3127ae0 Paolo Bonzini
        bounce.addr = addr;
2239 e3127ae0 Paolo Bonzini
        bounce.len = l;
2240 d3e71559 Paolo Bonzini
2241 d3e71559 Paolo Bonzini
        memory_region_ref(mr);
2242 d3e71559 Paolo Bonzini
        bounce.mr = mr;
2243 e3127ae0 Paolo Bonzini
        if (!is_write) {
2244 e3127ae0 Paolo Bonzini
            address_space_read(as, addr, bounce.buffer, l);
2245 8ab934f9 Stefano Stabellini
        }
2246 6d16c2f8 aliguori
2247 e3127ae0 Paolo Bonzini
        *plen = l;
2248 e3127ae0 Paolo Bonzini
        return bounce.buffer;
2249 e3127ae0 Paolo Bonzini
    }
2250 e3127ae0 Paolo Bonzini
2251 e3127ae0 Paolo Bonzini
    base = xlat;
2252 e3127ae0 Paolo Bonzini
    raddr = memory_region_get_ram_addr(mr);
2253 e3127ae0 Paolo Bonzini
2254 e3127ae0 Paolo Bonzini
    for (;;) {
2255 6d16c2f8 aliguori
        len -= l;
2256 6d16c2f8 aliguori
        addr += l;
2257 e3127ae0 Paolo Bonzini
        done += l;
2258 e3127ae0 Paolo Bonzini
        if (len == 0) {
2259 e3127ae0 Paolo Bonzini
            break;
2260 e3127ae0 Paolo Bonzini
        }
2261 e3127ae0 Paolo Bonzini
2262 e3127ae0 Paolo Bonzini
        l = len;
2263 e3127ae0 Paolo Bonzini
        this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2264 e3127ae0 Paolo Bonzini
        if (this_mr != mr || xlat != base + done) {
2265 e3127ae0 Paolo Bonzini
            break;
2266 e3127ae0 Paolo Bonzini
        }
2267 6d16c2f8 aliguori
    }
2268 e3127ae0 Paolo Bonzini
2269 d3e71559 Paolo Bonzini
    memory_region_ref(mr);
2270 e3127ae0 Paolo Bonzini
    *plen = done;
2271 e3127ae0 Paolo Bonzini
    return qemu_ram_ptr_length(raddr + base, plen);
2272 6d16c2f8 aliguori
}
2273 6d16c2f8 aliguori
2274 ac1970fb Avi Kivity
/* Unmaps a memory region previously mapped by address_space_map().
2275 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
2276 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
2277 6d16c2f8 aliguori
 */
2278 a8170e5e Avi Kivity
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2279 a8170e5e Avi Kivity
                         int is_write, hwaddr access_len)
2280 6d16c2f8 aliguori
{
2281 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
2282 d3e71559 Paolo Bonzini
        MemoryRegion *mr;
2283 d3e71559 Paolo Bonzini
        ram_addr_t addr1;
2284 d3e71559 Paolo Bonzini
2285 d3e71559 Paolo Bonzini
        mr = qemu_ram_addr_from_host(buffer, &addr1);
2286 d3e71559 Paolo Bonzini
        assert(mr != NULL);
2287 6d16c2f8 aliguori
        if (is_write) {
2288 6d16c2f8 aliguori
            while (access_len) {
2289 6d16c2f8 aliguori
                unsigned l;
2290 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
2291 6d16c2f8 aliguori
                if (l > access_len)
2292 6d16c2f8 aliguori
                    l = access_len;
2293 51d7a9eb Anthony PERARD
                invalidate_and_set_dirty(addr1, l);
2294 6d16c2f8 aliguori
                addr1 += l;
2295 6d16c2f8 aliguori
                access_len -= l;
2296 6d16c2f8 aliguori
            }
2297 6d16c2f8 aliguori
        }
2298 868bb33f Jan Kiszka
        if (xen_enabled()) {
2299 e41d7c69 Jan Kiszka
            xen_invalidate_map_cache_entry(buffer);
2300 050a0ddf Anthony PERARD
        }
2301 d3e71559 Paolo Bonzini
        memory_region_unref(mr);
2302 6d16c2f8 aliguori
        return;
2303 6d16c2f8 aliguori
    }
2304 6d16c2f8 aliguori
    if (is_write) {
2305 ac1970fb Avi Kivity
        address_space_write(as, bounce.addr, bounce.buffer, access_len);
2306 6d16c2f8 aliguori
    }
2307 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
2308 6d16c2f8 aliguori
    bounce.buffer = NULL;
2309 d3e71559 Paolo Bonzini
    memory_region_unref(bounce.mr);
2310 ba223c29 aliguori
    cpu_notify_map_clients();
2311 6d16c2f8 aliguori
}
2312 d0ecd2aa bellard
2313 a8170e5e Avi Kivity
void *cpu_physical_memory_map(hwaddr addr,
2314 a8170e5e Avi Kivity
                              hwaddr *plen,
2315 ac1970fb Avi Kivity
                              int is_write)
2316 ac1970fb Avi Kivity
{
2317 ac1970fb Avi Kivity
    return address_space_map(&address_space_memory, addr, plen, is_write);
2318 ac1970fb Avi Kivity
}
2319 ac1970fb Avi Kivity
2320 a8170e5e Avi Kivity
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2321 a8170e5e Avi Kivity
                               int is_write, hwaddr access_len)
2322 ac1970fb Avi Kivity
{
2323 ac1970fb Avi Kivity
    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2324 ac1970fb Avi Kivity
}
2325 ac1970fb Avi Kivity
2326 8df1cd07 bellard
/* warning: addr must be aligned */
2327 a8170e5e Avi Kivity
static inline uint32_t ldl_phys_internal(hwaddr addr,
2328 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2329 8df1cd07 bellard
{
2330 8df1cd07 bellard
    uint8_t *ptr;
2331 791af8c8 Paolo Bonzini
    uint64_t val;
2332 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2333 149f54b5 Paolo Bonzini
    hwaddr l = 4;
2334 149f54b5 Paolo Bonzini
    hwaddr addr1;
2335 8df1cd07 bellard
2336 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2337 5c8a00ce Paolo Bonzini
                                 false);
2338 5c8a00ce Paolo Bonzini
    if (l < 4 || !memory_access_is_direct(mr, false)) {
2339 8df1cd07 bellard
        /* I/O case */
2340 5c8a00ce Paolo Bonzini
        io_mem_read(mr, addr1, &val, 4);
2341 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2342 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2343 1e78bcc1 Alexander Graf
            val = bswap32(val);
2344 1e78bcc1 Alexander Graf
        }
2345 1e78bcc1 Alexander Graf
#else
2346 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2347 1e78bcc1 Alexander Graf
            val = bswap32(val);
2348 1e78bcc1 Alexander Graf
        }
2349 1e78bcc1 Alexander Graf
#endif
2350 8df1cd07 bellard
    } else {
2351 8df1cd07 bellard
        /* RAM case */
2352 5c8a00ce Paolo Bonzini
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2353 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2354 149f54b5 Paolo Bonzini
                               + addr1);
2355 1e78bcc1 Alexander Graf
        switch (endian) {
2356 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2357 1e78bcc1 Alexander Graf
            val = ldl_le_p(ptr);
2358 1e78bcc1 Alexander Graf
            break;
2359 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2360 1e78bcc1 Alexander Graf
            val = ldl_be_p(ptr);
2361 1e78bcc1 Alexander Graf
            break;
2362 1e78bcc1 Alexander Graf
        default:
2363 1e78bcc1 Alexander Graf
            val = ldl_p(ptr);
2364 1e78bcc1 Alexander Graf
            break;
2365 1e78bcc1 Alexander Graf
        }
2366 8df1cd07 bellard
    }
2367 8df1cd07 bellard
    return val;
2368 8df1cd07 bellard
}
2369 8df1cd07 bellard
2370 a8170e5e Avi Kivity
uint32_t ldl_phys(hwaddr addr)
2371 1e78bcc1 Alexander Graf
{
2372 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2373 1e78bcc1 Alexander Graf
}
2374 1e78bcc1 Alexander Graf
2375 a8170e5e Avi Kivity
uint32_t ldl_le_phys(hwaddr addr)
2376 1e78bcc1 Alexander Graf
{
2377 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2378 1e78bcc1 Alexander Graf
}
2379 1e78bcc1 Alexander Graf
2380 a8170e5e Avi Kivity
uint32_t ldl_be_phys(hwaddr addr)
2381 1e78bcc1 Alexander Graf
{
2382 1e78bcc1 Alexander Graf
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2383 1e78bcc1 Alexander Graf
}
2384 1e78bcc1 Alexander Graf
2385 84b7b8e7 bellard
/* warning: addr must be aligned */
2386 a8170e5e Avi Kivity
static inline uint64_t ldq_phys_internal(hwaddr addr,
2387 1e78bcc1 Alexander Graf
                                         enum device_endian endian)
2388 84b7b8e7 bellard
{
2389 84b7b8e7 bellard
    uint8_t *ptr;
2390 84b7b8e7 bellard
    uint64_t val;
2391 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2392 149f54b5 Paolo Bonzini
    hwaddr l = 8;
2393 149f54b5 Paolo Bonzini
    hwaddr addr1;
2394 84b7b8e7 bellard
2395 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2396 5c8a00ce Paolo Bonzini
                                 false);
2397 5c8a00ce Paolo Bonzini
    if (l < 8 || !memory_access_is_direct(mr, false)) {
2398 84b7b8e7 bellard
        /* I/O case */
2399 5c8a00ce Paolo Bonzini
        io_mem_read(mr, addr1, &val, 8);
2400 968a5627 Paolo Bonzini
#if defined(TARGET_WORDS_BIGENDIAN)
2401 968a5627 Paolo Bonzini
        if (endian == DEVICE_LITTLE_ENDIAN) {
2402 968a5627 Paolo Bonzini
            val = bswap64(val);
2403 968a5627 Paolo Bonzini
        }
2404 968a5627 Paolo Bonzini
#else
2405 968a5627 Paolo Bonzini
        if (endian == DEVICE_BIG_ENDIAN) {
2406 968a5627 Paolo Bonzini
            val = bswap64(val);
2407 968a5627 Paolo Bonzini
        }
2408 968a5627 Paolo Bonzini
#endif
2409 84b7b8e7 bellard
    } else {
2410 84b7b8e7 bellard
        /* RAM case */
2411 5c8a00ce Paolo Bonzini
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2412 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2413 149f54b5 Paolo Bonzini
                               + addr1);
2414 1e78bcc1 Alexander Graf
        switch (endian) {
2415 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2416 1e78bcc1 Alexander Graf
            val = ldq_le_p(ptr);
2417 1e78bcc1 Alexander Graf
            break;
2418 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2419 1e78bcc1 Alexander Graf
            val = ldq_be_p(ptr);
2420 1e78bcc1 Alexander Graf
            break;
2421 1e78bcc1 Alexander Graf
        default:
2422 1e78bcc1 Alexander Graf
            val = ldq_p(ptr);
2423 1e78bcc1 Alexander Graf
            break;
2424 1e78bcc1 Alexander Graf
        }
2425 84b7b8e7 bellard
    }
2426 84b7b8e7 bellard
    return val;
2427 84b7b8e7 bellard
}
2428 84b7b8e7 bellard
2429 a8170e5e Avi Kivity
uint64_t ldq_phys(hwaddr addr)
2430 1e78bcc1 Alexander Graf
{
2431 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2432 1e78bcc1 Alexander Graf
}
2433 1e78bcc1 Alexander Graf
2434 a8170e5e Avi Kivity
uint64_t ldq_le_phys(hwaddr addr)
2435 1e78bcc1 Alexander Graf
{
2436 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2437 1e78bcc1 Alexander Graf
}
2438 1e78bcc1 Alexander Graf
2439 a8170e5e Avi Kivity
uint64_t ldq_be_phys(hwaddr addr)
2440 1e78bcc1 Alexander Graf
{
2441 1e78bcc1 Alexander Graf
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2442 1e78bcc1 Alexander Graf
}
2443 1e78bcc1 Alexander Graf
2444 aab33094 bellard
/* XXX: optimize */
2445 a8170e5e Avi Kivity
uint32_t ldub_phys(hwaddr addr)
2446 aab33094 bellard
{
2447 aab33094 bellard
    uint8_t val;
2448 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
2449 aab33094 bellard
    return val;
2450 aab33094 bellard
}
2451 aab33094 bellard
2452 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2453 a8170e5e Avi Kivity
static inline uint32_t lduw_phys_internal(hwaddr addr,
2454 1e78bcc1 Alexander Graf
                                          enum device_endian endian)
2455 aab33094 bellard
{
2456 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2457 733f0b02 Michael S. Tsirkin
    uint64_t val;
2458 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2459 149f54b5 Paolo Bonzini
    hwaddr l = 2;
2460 149f54b5 Paolo Bonzini
    hwaddr addr1;
2461 733f0b02 Michael S. Tsirkin
2462 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2463 5c8a00ce Paolo Bonzini
                                 false);
2464 5c8a00ce Paolo Bonzini
    if (l < 2 || !memory_access_is_direct(mr, false)) {
2465 733f0b02 Michael S. Tsirkin
        /* I/O case */
2466 5c8a00ce Paolo Bonzini
        io_mem_read(mr, addr1, &val, 2);
2467 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2468 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2469 1e78bcc1 Alexander Graf
            val = bswap16(val);
2470 1e78bcc1 Alexander Graf
        }
2471 1e78bcc1 Alexander Graf
#else
2472 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2473 1e78bcc1 Alexander Graf
            val = bswap16(val);
2474 1e78bcc1 Alexander Graf
        }
2475 1e78bcc1 Alexander Graf
#endif
2476 733f0b02 Michael S. Tsirkin
    } else {
2477 733f0b02 Michael S. Tsirkin
        /* RAM case */
2478 5c8a00ce Paolo Bonzini
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2479 06ef3525 Avi Kivity
                                & TARGET_PAGE_MASK)
2480 149f54b5 Paolo Bonzini
                               + addr1);
2481 1e78bcc1 Alexander Graf
        switch (endian) {
2482 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2483 1e78bcc1 Alexander Graf
            val = lduw_le_p(ptr);
2484 1e78bcc1 Alexander Graf
            break;
2485 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2486 1e78bcc1 Alexander Graf
            val = lduw_be_p(ptr);
2487 1e78bcc1 Alexander Graf
            break;
2488 1e78bcc1 Alexander Graf
        default:
2489 1e78bcc1 Alexander Graf
            val = lduw_p(ptr);
2490 1e78bcc1 Alexander Graf
            break;
2491 1e78bcc1 Alexander Graf
        }
2492 733f0b02 Michael S. Tsirkin
    }
2493 733f0b02 Michael S. Tsirkin
    return val;
2494 aab33094 bellard
}
2495 aab33094 bellard
2496 a8170e5e Avi Kivity
uint32_t lduw_phys(hwaddr addr)
2497 1e78bcc1 Alexander Graf
{
2498 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2499 1e78bcc1 Alexander Graf
}
2500 1e78bcc1 Alexander Graf
2501 a8170e5e Avi Kivity
uint32_t lduw_le_phys(hwaddr addr)
2502 1e78bcc1 Alexander Graf
{
2503 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2504 1e78bcc1 Alexander Graf
}
2505 1e78bcc1 Alexander Graf
2506 a8170e5e Avi Kivity
uint32_t lduw_be_phys(hwaddr addr)
2507 1e78bcc1 Alexander Graf
{
2508 1e78bcc1 Alexander Graf
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2509 1e78bcc1 Alexander Graf
}
2510 1e78bcc1 Alexander Graf
2511 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
2512 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
2513 8df1cd07 bellard
   bits are used to track modified PTEs */
2514 a8170e5e Avi Kivity
void stl_phys_notdirty(hwaddr addr, uint32_t val)
2515 8df1cd07 bellard
{
2516 8df1cd07 bellard
    uint8_t *ptr;
2517 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2518 149f54b5 Paolo Bonzini
    hwaddr l = 4;
2519 149f54b5 Paolo Bonzini
    hwaddr addr1;
2520 8df1cd07 bellard
2521 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2522 5c8a00ce Paolo Bonzini
                                 true);
2523 5c8a00ce Paolo Bonzini
    if (l < 4 || !memory_access_is_direct(mr, true)) {
2524 5c8a00ce Paolo Bonzini
        io_mem_write(mr, addr1, val, 4);
2525 8df1cd07 bellard
    } else {
2526 5c8a00ce Paolo Bonzini
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2527 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2528 8df1cd07 bellard
        stl_p(ptr, val);
2529 74576198 aliguori
2530 74576198 aliguori
        if (unlikely(in_migration)) {
2531 a2cd8c85 Juan Quintela
            if (cpu_physical_memory_is_clean(addr1)) {
2532 74576198 aliguori
                /* invalidate code */
2533 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2534 74576198 aliguori
                /* set dirty bit */
2535 52159192 Juan Quintela
                cpu_physical_memory_set_dirty_flag(addr1,
2536 52159192 Juan Quintela
                                                   DIRTY_MEMORY_MIGRATION);
2537 52159192 Juan Quintela
                cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
2538 74576198 aliguori
            }
2539 74576198 aliguori
        }
2540 8df1cd07 bellard
    }
2541 8df1cd07 bellard
}
2542 8df1cd07 bellard
2543 8df1cd07 bellard
/* warning: addr must be aligned */
2544 a8170e5e Avi Kivity
static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2545 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2546 8df1cd07 bellard
{
2547 8df1cd07 bellard
    uint8_t *ptr;
2548 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2549 149f54b5 Paolo Bonzini
    hwaddr l = 4;
2550 149f54b5 Paolo Bonzini
    hwaddr addr1;
2551 8df1cd07 bellard
2552 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2553 5c8a00ce Paolo Bonzini
                                 true);
2554 5c8a00ce Paolo Bonzini
    if (l < 4 || !memory_access_is_direct(mr, true)) {
2555 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2556 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2557 1e78bcc1 Alexander Graf
            val = bswap32(val);
2558 1e78bcc1 Alexander Graf
        }
2559 1e78bcc1 Alexander Graf
#else
2560 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2561 1e78bcc1 Alexander Graf
            val = bswap32(val);
2562 1e78bcc1 Alexander Graf
        }
2563 1e78bcc1 Alexander Graf
#endif
2564 5c8a00ce Paolo Bonzini
        io_mem_write(mr, addr1, val, 4);
2565 8df1cd07 bellard
    } else {
2566 8df1cd07 bellard
        /* RAM case */
2567 5c8a00ce Paolo Bonzini
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2568 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
2569 1e78bcc1 Alexander Graf
        switch (endian) {
2570 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2571 1e78bcc1 Alexander Graf
            stl_le_p(ptr, val);
2572 1e78bcc1 Alexander Graf
            break;
2573 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2574 1e78bcc1 Alexander Graf
            stl_be_p(ptr, val);
2575 1e78bcc1 Alexander Graf
            break;
2576 1e78bcc1 Alexander Graf
        default:
2577 1e78bcc1 Alexander Graf
            stl_p(ptr, val);
2578 1e78bcc1 Alexander Graf
            break;
2579 1e78bcc1 Alexander Graf
        }
2580 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 4);
2581 8df1cd07 bellard
    }
2582 8df1cd07 bellard
}
2583 8df1cd07 bellard
2584 a8170e5e Avi Kivity
void stl_phys(hwaddr addr, uint32_t val)
2585 1e78bcc1 Alexander Graf
{
2586 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2587 1e78bcc1 Alexander Graf
}
2588 1e78bcc1 Alexander Graf
2589 a8170e5e Avi Kivity
void stl_le_phys(hwaddr addr, uint32_t val)
2590 1e78bcc1 Alexander Graf
{
2591 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2592 1e78bcc1 Alexander Graf
}
2593 1e78bcc1 Alexander Graf
2594 a8170e5e Avi Kivity
void stl_be_phys(hwaddr addr, uint32_t val)
2595 1e78bcc1 Alexander Graf
{
2596 1e78bcc1 Alexander Graf
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2597 1e78bcc1 Alexander Graf
}
2598 1e78bcc1 Alexander Graf
2599 aab33094 bellard
/* XXX: optimize */
2600 a8170e5e Avi Kivity
void stb_phys(hwaddr addr, uint32_t val)
2601 aab33094 bellard
{
2602 aab33094 bellard
    uint8_t v = val;
2603 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
2604 aab33094 bellard
}
2605 aab33094 bellard
2606 733f0b02 Michael S. Tsirkin
/* warning: addr must be aligned */
2607 a8170e5e Avi Kivity
static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2608 1e78bcc1 Alexander Graf
                                     enum device_endian endian)
2609 aab33094 bellard
{
2610 733f0b02 Michael S. Tsirkin
    uint8_t *ptr;
2611 5c8a00ce Paolo Bonzini
    MemoryRegion *mr;
2612 149f54b5 Paolo Bonzini
    hwaddr l = 2;
2613 149f54b5 Paolo Bonzini
    hwaddr addr1;
2614 733f0b02 Michael S. Tsirkin
2615 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2616 5c8a00ce Paolo Bonzini
                                 true);
2617 5c8a00ce Paolo Bonzini
    if (l < 2 || !memory_access_is_direct(mr, true)) {
2618 1e78bcc1 Alexander Graf
#if defined(TARGET_WORDS_BIGENDIAN)
2619 1e78bcc1 Alexander Graf
        if (endian == DEVICE_LITTLE_ENDIAN) {
2620 1e78bcc1 Alexander Graf
            val = bswap16(val);
2621 1e78bcc1 Alexander Graf
        }
2622 1e78bcc1 Alexander Graf
#else
2623 1e78bcc1 Alexander Graf
        if (endian == DEVICE_BIG_ENDIAN) {
2624 1e78bcc1 Alexander Graf
            val = bswap16(val);
2625 1e78bcc1 Alexander Graf
        }
2626 1e78bcc1 Alexander Graf
#endif
2627 5c8a00ce Paolo Bonzini
        io_mem_write(mr, addr1, val, 2);
2628 733f0b02 Michael S. Tsirkin
    } else {
2629 733f0b02 Michael S. Tsirkin
        /* RAM case */
2630 5c8a00ce Paolo Bonzini
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2631 733f0b02 Michael S. Tsirkin
        ptr = qemu_get_ram_ptr(addr1);
2632 1e78bcc1 Alexander Graf
        switch (endian) {
2633 1e78bcc1 Alexander Graf
        case DEVICE_LITTLE_ENDIAN:
2634 1e78bcc1 Alexander Graf
            stw_le_p(ptr, val);
2635 1e78bcc1 Alexander Graf
            break;
2636 1e78bcc1 Alexander Graf
        case DEVICE_BIG_ENDIAN:
2637 1e78bcc1 Alexander Graf
            stw_be_p(ptr, val);
2638 1e78bcc1 Alexander Graf
            break;
2639 1e78bcc1 Alexander Graf
        default:
2640 1e78bcc1 Alexander Graf
            stw_p(ptr, val);
2641 1e78bcc1 Alexander Graf
            break;
2642 1e78bcc1 Alexander Graf
        }
2643 51d7a9eb Anthony PERARD
        invalidate_and_set_dirty(addr1, 2);
2644 733f0b02 Michael S. Tsirkin
    }
2645 aab33094 bellard
}
2646 aab33094 bellard
2647 a8170e5e Avi Kivity
void stw_phys(hwaddr addr, uint32_t val)
2648 1e78bcc1 Alexander Graf
{
2649 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2650 1e78bcc1 Alexander Graf
}
2651 1e78bcc1 Alexander Graf
2652 a8170e5e Avi Kivity
void stw_le_phys(hwaddr addr, uint32_t val)
2653 1e78bcc1 Alexander Graf
{
2654 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2655 1e78bcc1 Alexander Graf
}
2656 1e78bcc1 Alexander Graf
2657 a8170e5e Avi Kivity
void stw_be_phys(hwaddr addr, uint32_t val)
2658 1e78bcc1 Alexander Graf
{
2659 1e78bcc1 Alexander Graf
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2660 1e78bcc1 Alexander Graf
}
2661 1e78bcc1 Alexander Graf
2662 aab33094 bellard
/* XXX: optimize */
2663 a8170e5e Avi Kivity
void stq_phys(hwaddr addr, uint64_t val)
2664 aab33094 bellard
{
2665 aab33094 bellard
    val = tswap64(val);
2666 71d2b725 Stefan Weil
    cpu_physical_memory_write(addr, &val, 8);
2667 aab33094 bellard
}
2668 aab33094 bellard
2669 a8170e5e Avi Kivity
void stq_le_phys(hwaddr addr, uint64_t val)
2670 1e78bcc1 Alexander Graf
{
2671 1e78bcc1 Alexander Graf
    val = cpu_to_le64(val);
2672 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
2673 1e78bcc1 Alexander Graf
}
2674 1e78bcc1 Alexander Graf
2675 a8170e5e Avi Kivity
void stq_be_phys(hwaddr addr, uint64_t val)
2676 1e78bcc1 Alexander Graf
{
2677 1e78bcc1 Alexander Graf
    val = cpu_to_be64(val);
2678 1e78bcc1 Alexander Graf
    cpu_physical_memory_write(addr, &val, 8);
2679 1e78bcc1 Alexander Graf
}
2680 1e78bcc1 Alexander Graf
2681 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
2682 f17ec444 Andreas Färber
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2683 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
2684 13eb76e0 bellard
{
2685 13eb76e0 bellard
    int l;
2686 a8170e5e Avi Kivity
    hwaddr phys_addr;
2687 9b3c35e0 j_mayer
    target_ulong page;
2688 13eb76e0 bellard
2689 13eb76e0 bellard
    while (len > 0) {
2690 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
2691 f17ec444 Andreas Färber
        phys_addr = cpu_get_phys_page_debug(cpu, page);
2692 13eb76e0 bellard
        /* if no physical page mapped, return an error */
2693 13eb76e0 bellard
        if (phys_addr == -1)
2694 13eb76e0 bellard
            return -1;
2695 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
2696 13eb76e0 bellard
        if (l > len)
2697 13eb76e0 bellard
            l = len;
2698 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
2699 5e2972fd aliguori
        if (is_write)
2700 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
2701 5e2972fd aliguori
        else
2702 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2703 13eb76e0 bellard
        len -= l;
2704 13eb76e0 bellard
        buf += l;
2705 13eb76e0 bellard
        addr += l;
2706 13eb76e0 bellard
    }
2707 13eb76e0 bellard
    return 0;
2708 13eb76e0 bellard
}
2709 a68fe89c Paul Brook
#endif
2710 13eb76e0 bellard
2711 8e4a424b Blue Swirl
#if !defined(CONFIG_USER_ONLY)
2712 8e4a424b Blue Swirl
2713 8e4a424b Blue Swirl
/*
2714 8e4a424b Blue Swirl
 * A helper function for the _utterly broken_ virtio device model to find out if
2715 8e4a424b Blue Swirl
 * it's running on a big endian machine. Don't do this at home kids!
2716 8e4a424b Blue Swirl
 */
2717 8e4a424b Blue Swirl
bool virtio_is_big_endian(void);
2718 8e4a424b Blue Swirl
bool virtio_is_big_endian(void)
2719 8e4a424b Blue Swirl
{
2720 8e4a424b Blue Swirl
#if defined(TARGET_WORDS_BIGENDIAN)
2721 8e4a424b Blue Swirl
    return true;
2722 8e4a424b Blue Swirl
#else
2723 8e4a424b Blue Swirl
    return false;
2724 8e4a424b Blue Swirl
#endif
2725 8e4a424b Blue Swirl
}
2726 8e4a424b Blue Swirl
2727 8e4a424b Blue Swirl
#endif
2728 8e4a424b Blue Swirl
2729 76f35538 Wen Congyang
#ifndef CONFIG_USER_ONLY
2730 a8170e5e Avi Kivity
bool cpu_physical_memory_is_io(hwaddr phys_addr)
2731 76f35538 Wen Congyang
{
2732 5c8a00ce Paolo Bonzini
    MemoryRegion*mr;
2733 149f54b5 Paolo Bonzini
    hwaddr l = 1;
2734 76f35538 Wen Congyang
2735 5c8a00ce Paolo Bonzini
    mr = address_space_translate(&address_space_memory,
2736 5c8a00ce Paolo Bonzini
                                 phys_addr, &phys_addr, &l, false);
2737 76f35538 Wen Congyang
2738 5c8a00ce Paolo Bonzini
    return !(memory_region_is_ram(mr) ||
2739 5c8a00ce Paolo Bonzini
             memory_region_is_romd(mr));
2740 76f35538 Wen Congyang
}
2741 bd2fa51f Michael R. Hines
2742 bd2fa51f Michael R. Hines
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2743 bd2fa51f Michael R. Hines
{
2744 bd2fa51f Michael R. Hines
    RAMBlock *block;
2745 bd2fa51f Michael R. Hines
2746 bd2fa51f Michael R. Hines
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2747 bd2fa51f Michael R. Hines
        func(block->host, block->offset, block->length, opaque);
2748 bd2fa51f Michael R. Hines
    }
2749 bd2fa51f Michael R. Hines
}
2750 ec3f8c99 Peter Maydell
#endif