Statistics
| Branch: | Revision:

root / exec.c @ e1e84ba0

History | View | Annotate | Download (73.4 kB)

1
/*
2
 *  Virtual page mapping
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26

    
27
#include "qemu-common.h"
28
#include "cpu.h"
29
#include "tcg.h"
30
#include "hw/hw.h"
31
#include "hw/qdev.h"
32
#include "qemu/osdep.h"
33
#include "sysemu/kvm.h"
34
#include "sysemu/sysemu.h"
35
#include "hw/xen/xen.h"
36
#include "qemu/timer.h"
37
#include "qemu/config-file.h"
38
#include "exec/memory.h"
39
#include "sysemu/dma.h"
40
#include "exec/address-spaces.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#else /* !CONFIG_USER_ONLY */
44
#include "sysemu/xen-mapcache.h"
45
#include "trace.h"
46
#endif
47
#include "exec/cpu-all.h"
48

    
49
#include "exec/cputlb.h"
50
#include "translate-all.h"
51

    
52
#include "exec/memory-internal.h"
53

    
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
static int in_migration;
58

    
59
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
60

    
61
static MemoryRegion *system_memory;
62
static MemoryRegion *system_io;
63

    
64
AddressSpace address_space_io;
65
AddressSpace address_space_memory;
66

    
67
MemoryRegion io_mem_rom, io_mem_notdirty;
68
static MemoryRegion io_mem_unassigned;
69

    
70
#endif
71

    
72
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
73
/* current CPU in the current thread. It is only valid inside
74
   cpu_exec() */
75
DEFINE_TLS(CPUState *, current_cpu);
76
/* 0 = Do not count executed instructions.
77
   1 = Precise instruction counting.
78
   2 = Adaptive rate instruction counting.  */
79
int use_icount;
80

    
81
#if !defined(CONFIG_USER_ONLY)
82

    
83
typedef struct PhysPageEntry PhysPageEntry;
84

    
85
struct PhysPageEntry {
86
    uint16_t is_leaf : 1;
87
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88
    uint16_t ptr : 15;
89
};
90

    
91
typedef PhysPageEntry Node[L2_SIZE];
92

    
93
struct AddressSpaceDispatch {
94
    /* This is a multi-level map on the physical address space.
95
     * The bottom level has pointers to MemoryRegionSections.
96
     */
97
    PhysPageEntry phys_map;
98
    Node *nodes;
99
    MemoryRegionSection *sections;
100
    AddressSpace *as;
101
};
102

    
103
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104
typedef struct subpage_t {
105
    MemoryRegion iomem;
106
    AddressSpace *as;
107
    hwaddr base;
108
    uint16_t sub_section[TARGET_PAGE_SIZE];
109
} subpage_t;
110

    
111
#define PHYS_SECTION_UNASSIGNED 0
112
#define PHYS_SECTION_NOTDIRTY 1
113
#define PHYS_SECTION_ROM 2
114
#define PHYS_SECTION_WATCH 3
115

    
116
typedef struct PhysPageMap {
117
    unsigned sections_nb;
118
    unsigned sections_nb_alloc;
119
    unsigned nodes_nb;
120
    unsigned nodes_nb_alloc;
121
    Node *nodes;
122
    MemoryRegionSection *sections;
123
} PhysPageMap;
124

    
125
static PhysPageMap *prev_map;
126
static PhysPageMap next_map;
127

    
128
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
129

    
130
static void io_mem_init(void);
131
static void memory_map_init(void);
132
static void *qemu_safe_ram_ptr(ram_addr_t addr);
133

    
134
static MemoryRegion io_mem_watch;
135
#endif
136

    
137
#if !defined(CONFIG_USER_ONLY)
138

    
139
static void phys_map_node_reserve(unsigned nodes)
140
{
141
    if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142
        next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143
                                            16);
144
        next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145
                                      next_map.nodes_nb + nodes);
146
        next_map.nodes = g_renew(Node, next_map.nodes,
147
                                 next_map.nodes_nb_alloc);
148
    }
149
}
150

    
151
static uint16_t phys_map_node_alloc(void)
152
{
153
    unsigned i;
154
    uint16_t ret;
155

    
156
    ret = next_map.nodes_nb++;
157
    assert(ret != PHYS_MAP_NODE_NIL);
158
    assert(ret != next_map.nodes_nb_alloc);
159
    for (i = 0; i < L2_SIZE; ++i) {
160
        next_map.nodes[ret][i].is_leaf = 0;
161
        next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
162
    }
163
    return ret;
164
}
165

    
166
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167
                                hwaddr *nb, uint16_t leaf,
168
                                int level)
169
{
170
    PhysPageEntry *p;
171
    int i;
172
    hwaddr step = (hwaddr)1 << (level * L2_BITS);
173

    
174
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
175
        lp->ptr = phys_map_node_alloc();
176
        p = next_map.nodes[lp->ptr];
177
        if (level == 0) {
178
            for (i = 0; i < L2_SIZE; i++) {
179
                p[i].is_leaf = 1;
180
                p[i].ptr = PHYS_SECTION_UNASSIGNED;
181
            }
182
        }
183
    } else {
184
        p = next_map.nodes[lp->ptr];
185
    }
186
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
187

    
188
    while (*nb && lp < &p[L2_SIZE]) {
189
        if ((*index & (step - 1)) == 0 && *nb >= step) {
190
            lp->is_leaf = true;
191
            lp->ptr = leaf;
192
            *index += step;
193
            *nb -= step;
194
        } else {
195
            phys_page_set_level(lp, index, nb, leaf, level - 1);
196
        }
197
        ++lp;
198
    }
199
}
200

    
201
static void phys_page_set(AddressSpaceDispatch *d,
202
                          hwaddr index, hwaddr nb,
203
                          uint16_t leaf)
204
{
205
    /* Wildly overreserve - it doesn't matter much. */
206
    phys_map_node_reserve(3 * P_L2_LEVELS);
207

    
208
    phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
209
}
210

    
211
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212
                                           Node *nodes, MemoryRegionSection *sections)
213
{
214
    PhysPageEntry *p;
215
    int i;
216

    
217
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
218
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
219
            return &sections[PHYS_SECTION_UNASSIGNED];
220
        }
221
        p = nodes[lp.ptr];
222
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
223
    }
224
    return &sections[lp.ptr];
225
}
226

    
227
bool memory_region_is_unassigned(MemoryRegion *mr)
228
{
229
    return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
230
        && mr != &io_mem_watch;
231
}
232

    
233
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
234
                                                        hwaddr addr,
235
                                                        bool resolve_subpage)
236
{
237
    MemoryRegionSection *section;
238
    subpage_t *subpage;
239

    
240
    section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241
                             d->nodes, d->sections);
242
    if (resolve_subpage && section->mr->subpage) {
243
        subpage = container_of(section->mr, subpage_t, iomem);
244
        section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
245
    }
246
    return section;
247
}
248

    
249
static MemoryRegionSection *
250
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
251
                                 hwaddr *plen, bool resolve_subpage)
252
{
253
    MemoryRegionSection *section;
254
    Int128 diff;
255

    
256
    section = address_space_lookup_region(d, addr, resolve_subpage);
257
    /* Compute offset within MemoryRegionSection */
258
    addr -= section->offset_within_address_space;
259

    
260
    /* Compute offset within MemoryRegion */
261
    *xlat = addr + section->offset_within_region;
262

    
263
    diff = int128_sub(section->mr->size, int128_make64(addr));
264
    *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
265
    return section;
266
}
267

    
268
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269
                                      hwaddr *xlat, hwaddr *plen,
270
                                      bool is_write)
271
{
272
    IOMMUTLBEntry iotlb;
273
    MemoryRegionSection *section;
274
    MemoryRegion *mr;
275
    hwaddr len = *plen;
276

    
277
    for (;;) {
278
        section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
279
        mr = section->mr;
280

    
281
        if (!mr->iommu_ops) {
282
            break;
283
        }
284

    
285
        iotlb = mr->iommu_ops->translate(mr, addr);
286
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287
                | (addr & iotlb.addr_mask));
288
        len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289
        if (!(iotlb.perm & (1 << is_write))) {
290
            mr = &io_mem_unassigned;
291
            break;
292
        }
293

    
294
        as = iotlb.target_as;
295
    }
296

    
297
    *plen = len;
298
    *xlat = addr;
299
    return mr;
300
}
301

    
302
MemoryRegionSection *
303
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304
                                  hwaddr *plen)
305
{
306
    MemoryRegionSection *section;
307
    section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
308

    
309
    assert(!section->mr->iommu_ops);
310
    return section;
311
}
312
#endif
313

    
314
void cpu_exec_init_all(void)
315
{
316
#if !defined(CONFIG_USER_ONLY)
317
    qemu_mutex_init(&ram_list.mutex);
318
    memory_map_init();
319
    io_mem_init();
320
#endif
321
}
322

    
323
#if !defined(CONFIG_USER_ONLY)
324

    
325
static int cpu_common_post_load(void *opaque, int version_id)
326
{
327
    CPUState *cpu = opaque;
328

    
329
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330
       version_id is increased. */
331
    cpu->interrupt_request &= ~0x01;
332
    tlb_flush(cpu->env_ptr, 1);
333

    
334
    return 0;
335
}
336

    
337
const VMStateDescription vmstate_cpu_common = {
338
    .name = "cpu_common",
339
    .version_id = 1,
340
    .minimum_version_id = 1,
341
    .minimum_version_id_old = 1,
342
    .post_load = cpu_common_post_load,
343
    .fields      = (VMStateField []) {
344
        VMSTATE_UINT32(halted, CPUState),
345
        VMSTATE_UINT32(interrupt_request, CPUState),
346
        VMSTATE_END_OF_LIST()
347
    }
348
};
349

    
350
#endif
351

    
352
CPUState *qemu_get_cpu(int index)
353
{
354
    CPUState *cpu;
355

    
356
    CPU_FOREACH(cpu) {
357
        if (cpu->cpu_index == index) {
358
            return cpu;
359
        }
360
    }
361

    
362
    return NULL;
363
}
364

    
365
void cpu_exec_init(CPUArchState *env)
366
{
367
    CPUState *cpu = ENV_GET_CPU(env);
368
    CPUClass *cc = CPU_GET_CLASS(cpu);
369
    CPUState *some_cpu;
370
    int cpu_index;
371

    
372
#if defined(CONFIG_USER_ONLY)
373
    cpu_list_lock();
374
#endif
375
    cpu_index = 0;
376
    CPU_FOREACH(some_cpu) {
377
        cpu_index++;
378
    }
379
    cpu->cpu_index = cpu_index;
380
    cpu->numa_node = 0;
381
    QTAILQ_INIT(&env->breakpoints);
382
    QTAILQ_INIT(&env->watchpoints);
383
#ifndef CONFIG_USER_ONLY
384
    cpu->thread_id = qemu_get_thread_id();
385
#endif
386
    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
387
#if defined(CONFIG_USER_ONLY)
388
    cpu_list_unlock();
389
#endif
390
    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
391
        vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
392
    }
393
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
394
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
395
                    cpu_save, cpu_load, env);
396
    assert(cc->vmsd == NULL);
397
    assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
398
#endif
399
    if (cc->vmsd != NULL) {
400
        vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
401
    }
402
}
403

    
404
#if defined(TARGET_HAS_ICE)
405
#if defined(CONFIG_USER_ONLY)
406
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
407
{
408
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
409
}
410
#else
411
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
412
{
413
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
414
            (pc & ~TARGET_PAGE_MASK));
415
}
416
#endif
417
#endif /* TARGET_HAS_ICE */
418

    
419
#if defined(CONFIG_USER_ONLY)
420
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
421

    
422
{
423
}
424

    
425
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
426
                          int flags, CPUWatchpoint **watchpoint)
427
{
428
    return -ENOSYS;
429
}
430
#else
431
/* Add a watchpoint.  */
432
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
433
                          int flags, CPUWatchpoint **watchpoint)
434
{
435
    target_ulong len_mask = ~(len - 1);
436
    CPUWatchpoint *wp;
437

    
438
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
439
    if ((len & (len - 1)) || (addr & ~len_mask) ||
440
            len == 0 || len > TARGET_PAGE_SIZE) {
441
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
442
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
443
        return -EINVAL;
444
    }
445
    wp = g_malloc(sizeof(*wp));
446

    
447
    wp->vaddr = addr;
448
    wp->len_mask = len_mask;
449
    wp->flags = flags;
450

    
451
    /* keep all GDB-injected watchpoints in front */
452
    if (flags & BP_GDB)
453
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
454
    else
455
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
456

    
457
    tlb_flush_page(env, addr);
458

    
459
    if (watchpoint)
460
        *watchpoint = wp;
461
    return 0;
462
}
463

    
464
/* Remove a specific watchpoint.  */
465
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
466
                          int flags)
467
{
468
    target_ulong len_mask = ~(len - 1);
469
    CPUWatchpoint *wp;
470

    
471
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
472
        if (addr == wp->vaddr && len_mask == wp->len_mask
473
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
474
            cpu_watchpoint_remove_by_ref(env, wp);
475
            return 0;
476
        }
477
    }
478
    return -ENOENT;
479
}
480

    
481
/* Remove a specific watchpoint by reference.  */
482
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
483
{
484
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
485

    
486
    tlb_flush_page(env, watchpoint->vaddr);
487

    
488
    g_free(watchpoint);
489
}
490

    
491
/* Remove all matching watchpoints.  */
492
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
493
{
494
    CPUWatchpoint *wp, *next;
495

    
496
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
497
        if (wp->flags & mask)
498
            cpu_watchpoint_remove_by_ref(env, wp);
499
    }
500
}
501
#endif
502

    
503
/* Add a breakpoint.  */
504
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
505
                          CPUBreakpoint **breakpoint)
506
{
507
#if defined(TARGET_HAS_ICE)
508
    CPUBreakpoint *bp;
509

    
510
    bp = g_malloc(sizeof(*bp));
511

    
512
    bp->pc = pc;
513
    bp->flags = flags;
514

    
515
    /* keep all GDB-injected breakpoints in front */
516
    if (flags & BP_GDB) {
517
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
518
    } else {
519
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
520
    }
521

    
522
    breakpoint_invalidate(ENV_GET_CPU(env), pc);
523

    
524
    if (breakpoint) {
525
        *breakpoint = bp;
526
    }
527
    return 0;
528
#else
529
    return -ENOSYS;
530
#endif
531
}
532

    
533
/* Remove a specific breakpoint.  */
534
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
535
{
536
#if defined(TARGET_HAS_ICE)
537
    CPUBreakpoint *bp;
538

    
539
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
540
        if (bp->pc == pc && bp->flags == flags) {
541
            cpu_breakpoint_remove_by_ref(env, bp);
542
            return 0;
543
        }
544
    }
545
    return -ENOENT;
546
#else
547
    return -ENOSYS;
548
#endif
549
}
550

    
551
/* Remove a specific breakpoint by reference.  */
552
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
553
{
554
#if defined(TARGET_HAS_ICE)
555
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
556

    
557
    breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
558

    
559
    g_free(breakpoint);
560
#endif
561
}
562

    
563
/* Remove all matching breakpoints. */
564
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
565
{
566
#if defined(TARGET_HAS_ICE)
567
    CPUBreakpoint *bp, *next;
568

    
569
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
570
        if (bp->flags & mask)
571
            cpu_breakpoint_remove_by_ref(env, bp);
572
    }
573
#endif
574
}
575

    
576
/* enable or disable single step mode. EXCP_DEBUG is returned by the
577
   CPU loop after each instruction */
578
void cpu_single_step(CPUState *cpu, int enabled)
579
{
580
#if defined(TARGET_HAS_ICE)
581
    if (cpu->singlestep_enabled != enabled) {
582
        cpu->singlestep_enabled = enabled;
583
        if (kvm_enabled()) {
584
            kvm_update_guest_debug(cpu, 0);
585
        } else {
586
            /* must flush all the translated code to avoid inconsistencies */
587
            /* XXX: only flush what is necessary */
588
            CPUArchState *env = cpu->env_ptr;
589
            tb_flush(env);
590
        }
591
    }
592
#endif
593
}
594

    
595
void cpu_abort(CPUArchState *env, const char *fmt, ...)
596
{
597
    CPUState *cpu = ENV_GET_CPU(env);
598
    va_list ap;
599
    va_list ap2;
600

    
601
    va_start(ap, fmt);
602
    va_copy(ap2, ap);
603
    fprintf(stderr, "qemu: fatal: ");
604
    vfprintf(stderr, fmt, ap);
605
    fprintf(stderr, "\n");
606
    cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
607
    if (qemu_log_enabled()) {
608
        qemu_log("qemu: fatal: ");
609
        qemu_log_vprintf(fmt, ap2);
610
        qemu_log("\n");
611
        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
612
        qemu_log_flush();
613
        qemu_log_close();
614
    }
615
    va_end(ap2);
616
    va_end(ap);
617
#if defined(CONFIG_USER_ONLY)
618
    {
619
        struct sigaction act;
620
        sigfillset(&act.sa_mask);
621
        act.sa_handler = SIG_DFL;
622
        sigaction(SIGABRT, &act, NULL);
623
    }
624
#endif
625
    abort();
626
}
627

    
628
CPUArchState *cpu_copy(CPUArchState *env)
629
{
630
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
631
#if defined(TARGET_HAS_ICE)
632
    CPUBreakpoint *bp;
633
    CPUWatchpoint *wp;
634
#endif
635

    
636
    /* Reset non arch specific state */
637
    cpu_reset(ENV_GET_CPU(new_env));
638

    
639
    /* Copy arch specific state into the new CPU */
640
    memcpy(new_env, env, sizeof(CPUArchState));
641

    
642
    /* Clone all break/watchpoints.
643
       Note: Once we support ptrace with hw-debug register access, make sure
644
       BP_CPU break/watchpoints are handled correctly on clone. */
645
    QTAILQ_INIT(&env->breakpoints);
646
    QTAILQ_INIT(&env->watchpoints);
647
#if defined(TARGET_HAS_ICE)
648
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
649
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
650
    }
651
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
652
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
653
                              wp->flags, NULL);
654
    }
655
#endif
656

    
657
    return new_env;
658
}
659

    
660
#if !defined(CONFIG_USER_ONLY)
661
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
662
                                      uintptr_t length)
663
{
664
    uintptr_t start1;
665

    
666
    /* we modify the TLB cache so that the dirty bit will be set again
667
       when accessing the range */
668
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
669
    /* Check that we don't span multiple blocks - this breaks the
670
       address comparisons below.  */
671
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
672
            != (end - 1) - start) {
673
        abort();
674
    }
675
    cpu_tlb_reset_dirty_all(start1, length);
676

    
677
}
678

    
679
/* Note: start and end must be within the same ram block.  */
680
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
681
                                     int dirty_flags)
682
{
683
    uintptr_t length;
684

    
685
    start &= TARGET_PAGE_MASK;
686
    end = TARGET_PAGE_ALIGN(end);
687

    
688
    length = end - start;
689
    if (length == 0)
690
        return;
691
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
692

    
693
    if (tcg_enabled()) {
694
        tlb_reset_dirty_range_all(start, end, length);
695
    }
696
}
697

    
698
static int cpu_physical_memory_set_dirty_tracking(int enable)
699
{
700
    int ret = 0;
701
    in_migration = enable;
702
    return ret;
703
}
704

    
705
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
706
                                       MemoryRegionSection *section,
707
                                       target_ulong vaddr,
708
                                       hwaddr paddr, hwaddr xlat,
709
                                       int prot,
710
                                       target_ulong *address)
711
{
712
    hwaddr iotlb;
713
    CPUWatchpoint *wp;
714

    
715
    if (memory_region_is_ram(section->mr)) {
716
        /* Normal RAM.  */
717
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
718
            + xlat;
719
        if (!section->readonly) {
720
            iotlb |= PHYS_SECTION_NOTDIRTY;
721
        } else {
722
            iotlb |= PHYS_SECTION_ROM;
723
        }
724
    } else {
725
        iotlb = section - address_space_memory.dispatch->sections;
726
        iotlb += xlat;
727
    }
728

    
729
    /* Make accesses to pages with watchpoints go via the
730
       watchpoint trap routines.  */
731
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
732
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
733
            /* Avoid trapping reads of pages with a write breakpoint. */
734
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
735
                iotlb = PHYS_SECTION_WATCH + paddr;
736
                *address |= TLB_MMIO;
737
                break;
738
            }
739
        }
740
    }
741

    
742
    return iotlb;
743
}
744
#endif /* defined(CONFIG_USER_ONLY) */
745

    
746
#if !defined(CONFIG_USER_ONLY)
747

    
748
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
749
                             uint16_t section);
750
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
751

    
752
static void *(*phys_mem_alloc)(ram_addr_t size) = qemu_anon_ram_alloc;
753

    
754
/*
755
 * Set a custom physical guest memory alloator.
756
 * Accelerators with unusual needs may need this.  Hopefully, we can
757
 * get rid of it eventually.
758
 */
759
void phys_mem_set_alloc(void *(*alloc)(ram_addr_t))
760
{
761
    phys_mem_alloc = alloc;
762
}
763

    
764
static uint16_t phys_section_add(MemoryRegionSection *section)
765
{
766
    /* The physical section number is ORed with a page-aligned
767
     * pointer to produce the iotlb entries.  Thus it should
768
     * never overflow into the page-aligned value.
769
     */
770
    assert(next_map.sections_nb < TARGET_PAGE_SIZE);
771

    
772
    if (next_map.sections_nb == next_map.sections_nb_alloc) {
773
        next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
774
                                         16);
775
        next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
776
                                    next_map.sections_nb_alloc);
777
    }
778
    next_map.sections[next_map.sections_nb] = *section;
779
    memory_region_ref(section->mr);
780
    return next_map.sections_nb++;
781
}
782

    
783
static void phys_section_destroy(MemoryRegion *mr)
784
{
785
    memory_region_unref(mr);
786

    
787
    if (mr->subpage) {
788
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
789
        memory_region_destroy(&subpage->iomem);
790
        g_free(subpage);
791
    }
792
}
793

    
794
static void phys_sections_free(PhysPageMap *map)
795
{
796
    while (map->sections_nb > 0) {
797
        MemoryRegionSection *section = &map->sections[--map->sections_nb];
798
        phys_section_destroy(section->mr);
799
    }
800
    g_free(map->sections);
801
    g_free(map->nodes);
802
    g_free(map);
803
}
804

    
805
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
806
{
807
    subpage_t *subpage;
808
    hwaddr base = section->offset_within_address_space
809
        & TARGET_PAGE_MASK;
810
    MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
811
                                                   next_map.nodes, next_map.sections);
812
    MemoryRegionSection subsection = {
813
        .offset_within_address_space = base,
814
        .size = int128_make64(TARGET_PAGE_SIZE),
815
    };
816
    hwaddr start, end;
817

    
818
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
819

    
820
    if (!(existing->mr->subpage)) {
821
        subpage = subpage_init(d->as, base);
822
        subsection.mr = &subpage->iomem;
823
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
824
                      phys_section_add(&subsection));
825
    } else {
826
        subpage = container_of(existing->mr, subpage_t, iomem);
827
    }
828
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
829
    end = start + int128_get64(section->size) - 1;
830
    subpage_register(subpage, start, end, phys_section_add(section));
831
}
832

    
833

    
834
static void register_multipage(AddressSpaceDispatch *d,
835
                               MemoryRegionSection *section)
836
{
837
    hwaddr start_addr = section->offset_within_address_space;
838
    uint16_t section_index = phys_section_add(section);
839
    uint64_t num_pages = int128_get64(int128_rshift(section->size,
840
                                                    TARGET_PAGE_BITS));
841

    
842
    assert(num_pages);
843
    phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
844
}
845

    
846
static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
847
{
848
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
849
    AddressSpaceDispatch *d = as->next_dispatch;
850
    MemoryRegionSection now = *section, remain = *section;
851
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
852

    
853
    if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
854
        uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
855
                       - now.offset_within_address_space;
856

    
857
        now.size = int128_min(int128_make64(left), now.size);
858
        register_subpage(d, &now);
859
    } else {
860
        now.size = int128_zero();
861
    }
862
    while (int128_ne(remain.size, now.size)) {
863
        remain.size = int128_sub(remain.size, now.size);
864
        remain.offset_within_address_space += int128_get64(now.size);
865
        remain.offset_within_region += int128_get64(now.size);
866
        now = remain;
867
        if (int128_lt(remain.size, page_size)) {
868
            register_subpage(d, &now);
869
        } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
870
            now.size = page_size;
871
            register_subpage(d, &now);
872
        } else {
873
            now.size = int128_and(now.size, int128_neg(page_size));
874
            register_multipage(d, &now);
875
        }
876
    }
877
}
878

    
879
void qemu_flush_coalesced_mmio_buffer(void)
880
{
881
    if (kvm_enabled())
882
        kvm_flush_coalesced_mmio_buffer();
883
}
884

    
885
void qemu_mutex_lock_ramlist(void)
886
{
887
    qemu_mutex_lock(&ram_list.mutex);
888
}
889

    
890
void qemu_mutex_unlock_ramlist(void)
891
{
892
    qemu_mutex_unlock(&ram_list.mutex);
893
}
894

    
895
#ifdef __linux__
896

    
897
#include <sys/vfs.h>
898

    
899
#define HUGETLBFS_MAGIC       0x958458f6
900

    
901
static long gethugepagesize(const char *path)
902
{
903
    struct statfs fs;
904
    int ret;
905

    
906
    do {
907
        ret = statfs(path, &fs);
908
    } while (ret != 0 && errno == EINTR);
909

    
910
    if (ret != 0) {
911
        perror(path);
912
        return 0;
913
    }
914

    
915
    if (fs.f_type != HUGETLBFS_MAGIC)
916
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
917

    
918
    return fs.f_bsize;
919
}
920

    
921
static void *file_ram_alloc(RAMBlock *block,
922
                            ram_addr_t memory,
923
                            const char *path)
924
{
925
    char *filename;
926
    char *sanitized_name;
927
    char *c;
928
    void *area;
929
    int fd;
930
#ifdef MAP_POPULATE
931
    int flags;
932
#endif
933
    unsigned long hpagesize;
934

    
935
    hpagesize = gethugepagesize(path);
936
    if (!hpagesize) {
937
        return NULL;
938
    }
939

    
940
    if (memory < hpagesize) {
941
        return NULL;
942
    }
943

    
944
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
945
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
946
        return NULL;
947
    }
948

    
949
    /* Make name safe to use with mkstemp by replacing '/' with '_'. */
950
    sanitized_name = g_strdup(block->mr->name);
951
    for (c = sanitized_name; *c != '\0'; c++) {
952
        if (*c == '/')
953
            *c = '_';
954
    }
955

    
956
    filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
957
                               sanitized_name);
958
    g_free(sanitized_name);
959

    
960
    fd = mkstemp(filename);
961
    if (fd < 0) {
962
        perror("unable to create backing store for hugepages");
963
        g_free(filename);
964
        return NULL;
965
    }
966
    unlink(filename);
967
    g_free(filename);
968

    
969
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
970

    
971
    /*
972
     * ftruncate is not supported by hugetlbfs in older
973
     * hosts, so don't bother bailing out on errors.
974
     * If anything goes wrong with it under other filesystems,
975
     * mmap will fail.
976
     */
977
    if (ftruncate(fd, memory))
978
        perror("ftruncate");
979

    
980
#ifdef MAP_POPULATE
981
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
982
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
983
     * to sidestep this quirk.
984
     */
985
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
986
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
987
#else
988
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
989
#endif
990
    if (area == MAP_FAILED) {
991
        perror("file_ram_alloc: can't mmap RAM pages");
992
        close(fd);
993
        return (NULL);
994
    }
995
    block->fd = fd;
996
    return area;
997
}
998
#else
999
static void *file_ram_alloc(RAMBlock *block,
1000
                            ram_addr_t memory,
1001
                            const char *path)
1002
{
1003
    fprintf(stderr, "-mem-path not supported on this host\n");
1004
    exit(1);
1005
}
1006
#endif
1007

    
1008
static ram_addr_t find_ram_offset(ram_addr_t size)
1009
{
1010
    RAMBlock *block, *next_block;
1011
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1012

    
1013
    assert(size != 0); /* it would hand out same offset multiple times */
1014

    
1015
    if (QTAILQ_EMPTY(&ram_list.blocks))
1016
        return 0;
1017

    
1018
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1019
        ram_addr_t end, next = RAM_ADDR_MAX;
1020

    
1021
        end = block->offset + block->length;
1022

    
1023
        QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
1024
            if (next_block->offset >= end) {
1025
                next = MIN(next, next_block->offset);
1026
            }
1027
        }
1028
        if (next - end >= size && next - end < mingap) {
1029
            offset = end;
1030
            mingap = next - end;
1031
        }
1032
    }
1033

    
1034
    if (offset == RAM_ADDR_MAX) {
1035
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1036
                (uint64_t)size);
1037
        abort();
1038
    }
1039

    
1040
    return offset;
1041
}
1042

    
1043
ram_addr_t last_ram_offset(void)
1044
{
1045
    RAMBlock *block;
1046
    ram_addr_t last = 0;
1047

    
1048
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
1049
        last = MAX(last, block->offset + block->length);
1050

    
1051
    return last;
1052
}
1053

    
1054
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1055
{
1056
    int ret;
1057

    
1058
    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1059
    if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1060
                           "dump-guest-core", true)) {
1061
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1062
        if (ret) {
1063
            perror("qemu_madvise");
1064
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1065
                            "but dump_guest_core=off specified\n");
1066
        }
1067
    }
1068
}
1069

    
1070
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1071
{
1072
    RAMBlock *new_block, *block;
1073

    
1074
    new_block = NULL;
1075
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1076
        if (block->offset == addr) {
1077
            new_block = block;
1078
            break;
1079
        }
1080
    }
1081
    assert(new_block);
1082
    assert(!new_block->idstr[0]);
1083

    
1084
    if (dev) {
1085
        char *id = qdev_get_dev_path(dev);
1086
        if (id) {
1087
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1088
            g_free(id);
1089
        }
1090
    }
1091
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1092

    
1093
    /* This assumes the iothread lock is taken here too.  */
1094
    qemu_mutex_lock_ramlist();
1095
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1096
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1097
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1098
                    new_block->idstr);
1099
            abort();
1100
        }
1101
    }
1102
    qemu_mutex_unlock_ramlist();
1103
}
1104

    
1105
static int memory_try_enable_merging(void *addr, size_t len)
1106
{
1107
    if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1108
        /* disabled by the user */
1109
        return 0;
1110
    }
1111

    
1112
    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1113
}
1114

    
1115
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1116
                                   MemoryRegion *mr)
1117
{
1118
    RAMBlock *block, *new_block;
1119

    
1120
    size = TARGET_PAGE_ALIGN(size);
1121
    new_block = g_malloc0(sizeof(*new_block));
1122
    new_block->fd = -1;
1123

    
1124
    /* This assumes the iothread lock is taken here too.  */
1125
    qemu_mutex_lock_ramlist();
1126
    new_block->mr = mr;
1127
    new_block->offset = find_ram_offset(size);
1128
    if (host) {
1129
        new_block->host = host;
1130
        new_block->flags |= RAM_PREALLOC_MASK;
1131
    } else if (xen_enabled()) {
1132
        if (mem_path) {
1133
            fprintf(stderr, "-mem-path not supported with Xen\n");
1134
            exit(1);
1135
        }
1136
        xen_ram_alloc(new_block->offset, size, mr);
1137
    } else {
1138
        if (mem_path) {
1139
            if (phys_mem_alloc != qemu_anon_ram_alloc) {
1140
                /*
1141
                 * file_ram_alloc() needs to allocate just like
1142
                 * phys_mem_alloc, but we haven't bothered to provide
1143
                 * a hook there.
1144
                 */
1145
                fprintf(stderr,
1146
                        "-mem-path not supported with this accelerator\n");
1147
                exit(1);
1148
            }
1149
            new_block->host = file_ram_alloc(new_block, size, mem_path);
1150
        }
1151
        if (!new_block->host) {
1152
            new_block->host = phys_mem_alloc(size);
1153
            memory_try_enable_merging(new_block->host, size);
1154
        }
1155
    }
1156
    new_block->length = size;
1157

    
1158
    /* Keep the list sorted from biggest to smallest block.  */
1159
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1160
        if (block->length < new_block->length) {
1161
            break;
1162
        }
1163
    }
1164
    if (block) {
1165
        QTAILQ_INSERT_BEFORE(block, new_block, next);
1166
    } else {
1167
        QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1168
    }
1169
    ram_list.mru_block = NULL;
1170

    
1171
    ram_list.version++;
1172
    qemu_mutex_unlock_ramlist();
1173

    
1174
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1175
                                       last_ram_offset() >> TARGET_PAGE_BITS);
1176
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1177
           0, size >> TARGET_PAGE_BITS);
1178
    cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1179

    
1180
    qemu_ram_setup_dump(new_block->host, size);
1181
    qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1182

    
1183
    if (kvm_enabled())
1184
        kvm_setup_guest_memory(new_block->host, size);
1185

    
1186
    return new_block->offset;
1187
}
1188

    
1189
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
1190
{
1191
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
1192
}
1193

    
1194
void qemu_ram_free_from_ptr(ram_addr_t addr)
1195
{
1196
    RAMBlock *block;
1197

    
1198
    /* This assumes the iothread lock is taken here too.  */
1199
    qemu_mutex_lock_ramlist();
1200
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1201
        if (addr == block->offset) {
1202
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1203
            ram_list.mru_block = NULL;
1204
            ram_list.version++;
1205
            g_free(block);
1206
            break;
1207
        }
1208
    }
1209
    qemu_mutex_unlock_ramlist();
1210
}
1211

    
1212
void qemu_ram_free(ram_addr_t addr)
1213
{
1214
    RAMBlock *block;
1215

    
1216
    /* This assumes the iothread lock is taken here too.  */
1217
    qemu_mutex_lock_ramlist();
1218
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1219
        if (addr == block->offset) {
1220
            QTAILQ_REMOVE(&ram_list.blocks, block, next);
1221
            ram_list.mru_block = NULL;
1222
            ram_list.version++;
1223
            if (block->flags & RAM_PREALLOC_MASK) {
1224
                ;
1225
            } else if (xen_enabled()) {
1226
                xen_invalidate_map_cache_entry(block->host);
1227
            } else if (block->fd >= 0) {
1228
                munmap(block->host, block->length);
1229
                close(block->fd);
1230
            } else {
1231
                qemu_anon_ram_free(block->host, block->length);
1232
            }
1233
            g_free(block);
1234
            break;
1235
        }
1236
    }
1237
    qemu_mutex_unlock_ramlist();
1238

    
1239
}
1240

    
1241
#ifndef _WIN32
1242
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1243
{
1244
    RAMBlock *block;
1245
    ram_addr_t offset;
1246
    int flags;
1247
    void *area, *vaddr;
1248

    
1249
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1250
        offset = addr - block->offset;
1251
        if (offset < block->length) {
1252
            vaddr = block->host + offset;
1253
            if (block->flags & RAM_PREALLOC_MASK) {
1254
                ;
1255
            } else if (xen_enabled()) {
1256
                abort();
1257
            } else {
1258
                flags = MAP_FIXED;
1259
                munmap(vaddr, length);
1260
                if (block->fd >= 0) {
1261
#ifdef MAP_POPULATE
1262
                    flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1263
                        MAP_PRIVATE;
1264
#else
1265
                    flags |= MAP_PRIVATE;
1266
#endif
1267
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1268
                                flags, block->fd, offset);
1269
                } else {
1270
                    /*
1271
                     * Remap needs to match alloc.  Accelerators that
1272
                     * set phys_mem_alloc never remap.  If they did,
1273
                     * we'd need a remap hook here.
1274
                     */
1275
                    assert(phys_mem_alloc == qemu_anon_ram_alloc);
1276

    
1277
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1278
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1279
                                flags, -1, 0);
1280
                }
1281
                if (area != vaddr) {
1282
                    fprintf(stderr, "Could not remap addr: "
1283
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1284
                            length, addr);
1285
                    exit(1);
1286
                }
1287
                memory_try_enable_merging(vaddr, length);
1288
                qemu_ram_setup_dump(vaddr, length);
1289
            }
1290
            return;
1291
        }
1292
    }
1293
}
1294
#endif /* !_WIN32 */
1295

    
1296
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1297
{
1298
    RAMBlock *block;
1299

    
1300
    /* The list is protected by the iothread lock here.  */
1301
    block = ram_list.mru_block;
1302
    if (block && addr - block->offset < block->length) {
1303
        goto found;
1304
    }
1305
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1306
        if (addr - block->offset < block->length) {
1307
            goto found;
1308
        }
1309
    }
1310

    
1311
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1312
    abort();
1313

    
1314
found:
1315
    ram_list.mru_block = block;
1316
    return block;
1317
}
1318

    
1319
/* Return a host pointer to ram allocated with qemu_ram_alloc.
1320
   With the exception of the softmmu code in this file, this should
1321
   only be used for local memory (e.g. video ram) that the device owns,
1322
   and knows it isn't going to access beyond the end of the block.
1323

1324
   It should not be used for general purpose DMA.
1325
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1326
 */
1327
void *qemu_get_ram_ptr(ram_addr_t addr)
1328
{
1329
    RAMBlock *block = qemu_get_ram_block(addr);
1330

    
1331
    if (xen_enabled()) {
1332
        /* We need to check if the requested address is in the RAM
1333
         * because we don't want to map the entire memory in QEMU.
1334
         * In that case just map until the end of the page.
1335
         */
1336
        if (block->offset == 0) {
1337
            return xen_map_cache(addr, 0, 0);
1338
        } else if (block->host == NULL) {
1339
            block->host =
1340
                xen_map_cache(block->offset, block->length, 1);
1341
        }
1342
    }
1343
    return block->host + (addr - block->offset);
1344
}
1345

    
1346
/* Return a host pointer to ram allocated with qemu_ram_alloc.  Same as
1347
 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1348
 *
1349
 * ??? Is this still necessary?
1350
 */
1351
static void *qemu_safe_ram_ptr(ram_addr_t addr)
1352
{
1353
    RAMBlock *block;
1354

    
1355
    /* The list is protected by the iothread lock here.  */
1356
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1357
        if (addr - block->offset < block->length) {
1358
            if (xen_enabled()) {
1359
                /* We need to check if the requested address is in the RAM
1360
                 * because we don't want to map the entire memory in QEMU.
1361
                 * In that case just map until the end of the page.
1362
                 */
1363
                if (block->offset == 0) {
1364
                    return xen_map_cache(addr, 0, 0);
1365
                } else if (block->host == NULL) {
1366
                    block->host =
1367
                        xen_map_cache(block->offset, block->length, 1);
1368
                }
1369
            }
1370
            return block->host + (addr - block->offset);
1371
        }
1372
    }
1373

    
1374
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1375
    abort();
1376

    
1377
    return NULL;
1378
}
1379

    
1380
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1381
 * but takes a size argument */
1382
static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1383
{
1384
    if (*size == 0) {
1385
        return NULL;
1386
    }
1387
    if (xen_enabled()) {
1388
        return xen_map_cache(addr, *size, 1);
1389
    } else {
1390
        RAMBlock *block;
1391

    
1392
        QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1393
            if (addr - block->offset < block->length) {
1394
                if (addr - block->offset + *size > block->length)
1395
                    *size = block->length - addr + block->offset;
1396
                return block->host + (addr - block->offset);
1397
            }
1398
        }
1399

    
1400
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1401
        abort();
1402
    }
1403
}
1404

    
1405
/* Some of the softmmu routines need to translate from a host pointer
1406
   (typically a TLB entry) back to a ram offset.  */
1407
MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1408
{
1409
    RAMBlock *block;
1410
    uint8_t *host = ptr;
1411

    
1412
    if (xen_enabled()) {
1413
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
1414
        return qemu_get_ram_block(*ram_addr)->mr;
1415
    }
1416

    
1417
    block = ram_list.mru_block;
1418
    if (block && block->host && host - block->host < block->length) {
1419
        goto found;
1420
    }
1421

    
1422
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1423
        /* This case append when the block is not mapped. */
1424
        if (block->host == NULL) {
1425
            continue;
1426
        }
1427
        if (host - block->host < block->length) {
1428
            goto found;
1429
        }
1430
    }
1431

    
1432
    return NULL;
1433

    
1434
found:
1435
    *ram_addr = block->offset + (host - block->host);
1436
    return block->mr;
1437
}
1438

    
1439
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1440
                               uint64_t val, unsigned size)
1441
{
1442
    int dirty_flags;
1443
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1444
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1445
        tb_invalidate_phys_page_fast(ram_addr, size);
1446
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1447
    }
1448
    switch (size) {
1449
    case 1:
1450
        stb_p(qemu_get_ram_ptr(ram_addr), val);
1451
        break;
1452
    case 2:
1453
        stw_p(qemu_get_ram_ptr(ram_addr), val);
1454
        break;
1455
    case 4:
1456
        stl_p(qemu_get_ram_ptr(ram_addr), val);
1457
        break;
1458
    default:
1459
        abort();
1460
    }
1461
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1462
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1463
    /* we remove the notdirty callback only if the code has been
1464
       flushed */
1465
    if (dirty_flags == 0xff) {
1466
        CPUArchState *env = current_cpu->env_ptr;
1467
        tlb_set_dirty(env, env->mem_io_vaddr);
1468
    }
1469
}
1470

    
1471
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1472
                                 unsigned size, bool is_write)
1473
{
1474
    return is_write;
1475
}
1476

    
1477
static const MemoryRegionOps notdirty_mem_ops = {
1478
    .write = notdirty_mem_write,
1479
    .valid.accepts = notdirty_mem_accepts,
1480
    .endianness = DEVICE_NATIVE_ENDIAN,
1481
};
1482

    
1483
/* Generate a debug exception if a watchpoint has been hit.  */
1484
static void check_watchpoint(int offset, int len_mask, int flags)
1485
{
1486
    CPUArchState *env = current_cpu->env_ptr;
1487
    target_ulong pc, cs_base;
1488
    target_ulong vaddr;
1489
    CPUWatchpoint *wp;
1490
    int cpu_flags;
1491

    
1492
    if (env->watchpoint_hit) {
1493
        /* We re-entered the check after replacing the TB. Now raise
1494
         * the debug interrupt so that is will trigger after the
1495
         * current instruction. */
1496
        cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
1497
        return;
1498
    }
1499
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1500
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1501
        if ((vaddr == (wp->vaddr & len_mask) ||
1502
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1503
            wp->flags |= BP_WATCHPOINT_HIT;
1504
            if (!env->watchpoint_hit) {
1505
                env->watchpoint_hit = wp;
1506
                tb_check_watchpoint(env);
1507
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1508
                    env->exception_index = EXCP_DEBUG;
1509
                    cpu_loop_exit(env);
1510
                } else {
1511
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1512
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1513
                    cpu_resume_from_signal(env, NULL);
1514
                }
1515
            }
1516
        } else {
1517
            wp->flags &= ~BP_WATCHPOINT_HIT;
1518
        }
1519
    }
1520
}
1521

    
1522
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
1523
   so these check for a hit then pass through to the normal out-of-line
1524
   phys routines.  */
1525
static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1526
                               unsigned size)
1527
{
1528
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1529
    switch (size) {
1530
    case 1: return ldub_phys(addr);
1531
    case 2: return lduw_phys(addr);
1532
    case 4: return ldl_phys(addr);
1533
    default: abort();
1534
    }
1535
}
1536

    
1537
static void watch_mem_write(void *opaque, hwaddr addr,
1538
                            uint64_t val, unsigned size)
1539
{
1540
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1541
    switch (size) {
1542
    case 1:
1543
        stb_phys(addr, val);
1544
        break;
1545
    case 2:
1546
        stw_phys(addr, val);
1547
        break;
1548
    case 4:
1549
        stl_phys(addr, val);
1550
        break;
1551
    default: abort();
1552
    }
1553
}
1554

    
1555
static const MemoryRegionOps watch_mem_ops = {
1556
    .read = watch_mem_read,
1557
    .write = watch_mem_write,
1558
    .endianness = DEVICE_NATIVE_ENDIAN,
1559
};
1560

    
1561
static uint64_t subpage_read(void *opaque, hwaddr addr,
1562
                             unsigned len)
1563
{
1564
    subpage_t *subpage = opaque;
1565
    uint8_t buf[4];
1566

    
1567
#if defined(DEBUG_SUBPAGE)
1568
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1569
           subpage, len, addr);
1570
#endif
1571
    address_space_read(subpage->as, addr + subpage->base, buf, len);
1572
    switch (len) {
1573
    case 1:
1574
        return ldub_p(buf);
1575
    case 2:
1576
        return lduw_p(buf);
1577
    case 4:
1578
        return ldl_p(buf);
1579
    default:
1580
        abort();
1581
    }
1582
}
1583

    
1584
static void subpage_write(void *opaque, hwaddr addr,
1585
                          uint64_t value, unsigned len)
1586
{
1587
    subpage_t *subpage = opaque;
1588
    uint8_t buf[4];
1589

    
1590
#if defined(DEBUG_SUBPAGE)
1591
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1592
           " value %"PRIx64"\n",
1593
           __func__, subpage, len, addr, value);
1594
#endif
1595
    switch (len) {
1596
    case 1:
1597
        stb_p(buf, value);
1598
        break;
1599
    case 2:
1600
        stw_p(buf, value);
1601
        break;
1602
    case 4:
1603
        stl_p(buf, value);
1604
        break;
1605
    default:
1606
        abort();
1607
    }
1608
    address_space_write(subpage->as, addr + subpage->base, buf, len);
1609
}
1610

    
1611
static bool subpage_accepts(void *opaque, hwaddr addr,
1612
                            unsigned size, bool is_write)
1613
{
1614
    subpage_t *subpage = opaque;
1615
#if defined(DEBUG_SUBPAGE)
1616
    printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1617
           __func__, subpage, is_write ? 'w' : 'r', len, addr);
1618
#endif
1619

    
1620
    return address_space_access_valid(subpage->as, addr + subpage->base,
1621
                                      size, is_write);
1622
}
1623

    
1624
static const MemoryRegionOps subpage_ops = {
1625
    .read = subpage_read,
1626
    .write = subpage_write,
1627
    .valid.accepts = subpage_accepts,
1628
    .endianness = DEVICE_NATIVE_ENDIAN,
1629
};
1630

    
1631
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1632
                             uint16_t section)
1633
{
1634
    int idx, eidx;
1635

    
1636
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1637
        return -1;
1638
    idx = SUBPAGE_IDX(start);
1639
    eidx = SUBPAGE_IDX(end);
1640
#if defined(DEBUG_SUBPAGE)
1641
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1642
           mmio, start, end, idx, eidx, memory);
1643
#endif
1644
    for (; idx <= eidx; idx++) {
1645
        mmio->sub_section[idx] = section;
1646
    }
1647

    
1648
    return 0;
1649
}
1650

    
1651
static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
1652
{
1653
    subpage_t *mmio;
1654

    
1655
    mmio = g_malloc0(sizeof(subpage_t));
1656

    
1657
    mmio->as = as;
1658
    mmio->base = base;
1659
    memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
1660
                          "subpage", TARGET_PAGE_SIZE);
1661
    mmio->iomem.subpage = true;
1662
#if defined(DEBUG_SUBPAGE)
1663
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1664
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1665
#endif
1666
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
1667

    
1668
    return mmio;
1669
}
1670

    
1671
static uint16_t dummy_section(MemoryRegion *mr)
1672
{
1673
    MemoryRegionSection section = {
1674
        .mr = mr,
1675
        .offset_within_address_space = 0,
1676
        .offset_within_region = 0,
1677
        .size = int128_2_64(),
1678
    };
1679

    
1680
    return phys_section_add(&section);
1681
}
1682

    
1683
MemoryRegion *iotlb_to_region(hwaddr index)
1684
{
1685
    return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
1686
}
1687

    
1688
static void io_mem_init(void)
1689
{
1690
    memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1691
    memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1692
                          "unassigned", UINT64_MAX);
1693
    memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1694
                          "notdirty", UINT64_MAX);
1695
    memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1696
                          "watch", UINT64_MAX);
1697
}
1698

    
1699
static void mem_begin(MemoryListener *listener)
1700
{
1701
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1702
    AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1703

    
1704
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1705
    d->as = as;
1706
    as->next_dispatch = d;
1707
}
1708

    
1709
static void mem_commit(MemoryListener *listener)
1710
{
1711
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1712
    AddressSpaceDispatch *cur = as->dispatch;
1713
    AddressSpaceDispatch *next = as->next_dispatch;
1714

    
1715
    next->nodes = next_map.nodes;
1716
    next->sections = next_map.sections;
1717

    
1718
    as->dispatch = next;
1719
    g_free(cur);
1720
}
1721

    
1722
static void core_begin(MemoryListener *listener)
1723
{
1724
    uint16_t n;
1725

    
1726
    prev_map = g_new(PhysPageMap, 1);
1727
    *prev_map = next_map;
1728

    
1729
    memset(&next_map, 0, sizeof(next_map));
1730
    n = dummy_section(&io_mem_unassigned);
1731
    assert(n == PHYS_SECTION_UNASSIGNED);
1732
    n = dummy_section(&io_mem_notdirty);
1733
    assert(n == PHYS_SECTION_NOTDIRTY);
1734
    n = dummy_section(&io_mem_rom);
1735
    assert(n == PHYS_SECTION_ROM);
1736
    n = dummy_section(&io_mem_watch);
1737
    assert(n == PHYS_SECTION_WATCH);
1738
}
1739

    
1740
/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1741
 * All AddressSpaceDispatch instances have switched to the next map.
1742
 */
1743
static void core_commit(MemoryListener *listener)
1744
{
1745
    phys_sections_free(prev_map);
1746
}
1747

    
1748
static void tcg_commit(MemoryListener *listener)
1749
{
1750
    CPUState *cpu;
1751

    
1752
    /* since each CPU stores ram addresses in its TLB cache, we must
1753
       reset the modified entries */
1754
    /* XXX: slow ! */
1755
    CPU_FOREACH(cpu) {
1756
        CPUArchState *env = cpu->env_ptr;
1757

    
1758
        tlb_flush(env, 1);
1759
    }
1760
}
1761

    
1762
static void core_log_global_start(MemoryListener *listener)
1763
{
1764
    cpu_physical_memory_set_dirty_tracking(1);
1765
}
1766

    
1767
static void core_log_global_stop(MemoryListener *listener)
1768
{
1769
    cpu_physical_memory_set_dirty_tracking(0);
1770
}
1771

    
1772
static MemoryListener core_memory_listener = {
1773
    .begin = core_begin,
1774
    .commit = core_commit,
1775
    .log_global_start = core_log_global_start,
1776
    .log_global_stop = core_log_global_stop,
1777
    .priority = 1,
1778
};
1779

    
1780
static MemoryListener tcg_memory_listener = {
1781
    .commit = tcg_commit,
1782
};
1783

    
1784
void address_space_init_dispatch(AddressSpace *as)
1785
{
1786
    as->dispatch = NULL;
1787
    as->dispatch_listener = (MemoryListener) {
1788
        .begin = mem_begin,
1789
        .commit = mem_commit,
1790
        .region_add = mem_add,
1791
        .region_nop = mem_add,
1792
        .priority = 0,
1793
    };
1794
    memory_listener_register(&as->dispatch_listener, as);
1795
}
1796

    
1797
void address_space_destroy_dispatch(AddressSpace *as)
1798
{
1799
    AddressSpaceDispatch *d = as->dispatch;
1800

    
1801
    memory_listener_unregister(&as->dispatch_listener);
1802
    g_free(d);
1803
    as->dispatch = NULL;
1804
}
1805

    
1806
static void memory_map_init(void)
1807
{
1808
    system_memory = g_malloc(sizeof(*system_memory));
1809
    memory_region_init(system_memory, NULL, "system", INT64_MAX);
1810
    address_space_init(&address_space_memory, system_memory, "memory");
1811

    
1812
    system_io = g_malloc(sizeof(*system_io));
1813
    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1814
                          65536);
1815
    address_space_init(&address_space_io, system_io, "I/O");
1816

    
1817
    memory_listener_register(&core_memory_listener, &address_space_memory);
1818
    if (tcg_enabled()) {
1819
        memory_listener_register(&tcg_memory_listener, &address_space_memory);
1820
    }
1821
}
1822

    
1823
MemoryRegion *get_system_memory(void)
1824
{
1825
    return system_memory;
1826
}
1827

    
1828
MemoryRegion *get_system_io(void)
1829
{
1830
    return system_io;
1831
}
1832

    
1833
#endif /* !defined(CONFIG_USER_ONLY) */
1834

    
1835
/* physical memory access (slow version, mainly for debug) */
1836
#if defined(CONFIG_USER_ONLY)
1837
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
1838
                        uint8_t *buf, int len, int is_write)
1839
{
1840
    int l, flags;
1841
    target_ulong page;
1842
    void * p;
1843

    
1844
    while (len > 0) {
1845
        page = addr & TARGET_PAGE_MASK;
1846
        l = (page + TARGET_PAGE_SIZE) - addr;
1847
        if (l > len)
1848
            l = len;
1849
        flags = page_get_flags(page);
1850
        if (!(flags & PAGE_VALID))
1851
            return -1;
1852
        if (is_write) {
1853
            if (!(flags & PAGE_WRITE))
1854
                return -1;
1855
            /* XXX: this code should not depend on lock_user */
1856
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1857
                return -1;
1858
            memcpy(p, buf, l);
1859
            unlock_user(p, addr, l);
1860
        } else {
1861
            if (!(flags & PAGE_READ))
1862
                return -1;
1863
            /* XXX: this code should not depend on lock_user */
1864
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1865
                return -1;
1866
            memcpy(buf, p, l);
1867
            unlock_user(p, addr, 0);
1868
        }
1869
        len -= l;
1870
        buf += l;
1871
        addr += l;
1872
    }
1873
    return 0;
1874
}
1875

    
1876
#else
1877

    
1878
static void invalidate_and_set_dirty(hwaddr addr,
1879
                                     hwaddr length)
1880
{
1881
    if (!cpu_physical_memory_is_dirty(addr)) {
1882
        /* invalidate code */
1883
        tb_invalidate_phys_page_range(addr, addr + length, 0);
1884
        /* set dirty bit */
1885
        cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1886
    }
1887
    xen_modified_memory(addr, length);
1888
}
1889

    
1890
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1891
{
1892
    if (memory_region_is_ram(mr)) {
1893
        return !(is_write && mr->readonly);
1894
    }
1895
    if (memory_region_is_romd(mr)) {
1896
        return !is_write;
1897
    }
1898

    
1899
    return false;
1900
}
1901

    
1902
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
1903
{
1904
    unsigned access_size_max = mr->ops->valid.max_access_size;
1905

    
1906
    /* Regions are assumed to support 1-4 byte accesses unless
1907
       otherwise specified.  */
1908
    if (access_size_max == 0) {
1909
        access_size_max = 4;
1910
    }
1911

    
1912
    /* Bound the maximum access by the alignment of the address.  */
1913
    if (!mr->ops->impl.unaligned) {
1914
        unsigned align_size_max = addr & -addr;
1915
        if (align_size_max != 0 && align_size_max < access_size_max) {
1916
            access_size_max = align_size_max;
1917
        }
1918
    }
1919

    
1920
    /* Don't attempt accesses larger than the maximum.  */
1921
    if (l > access_size_max) {
1922
        l = access_size_max;
1923
    }
1924
    if (l & (l - 1)) {
1925
        l = 1 << (qemu_fls(l) - 1);
1926
    }
1927

    
1928
    return l;
1929
}
1930

    
1931
bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1932
                      int len, bool is_write)
1933
{
1934
    hwaddr l;
1935
    uint8_t *ptr;
1936
    uint64_t val;
1937
    hwaddr addr1;
1938
    MemoryRegion *mr;
1939
    bool error = false;
1940

    
1941
    while (len > 0) {
1942
        l = len;
1943
        mr = address_space_translate(as, addr, &addr1, &l, is_write);
1944

    
1945
        if (is_write) {
1946
            if (!memory_access_is_direct(mr, is_write)) {
1947
                l = memory_access_size(mr, l, addr1);
1948
                /* XXX: could force current_cpu to NULL to avoid
1949
                   potential bugs */
1950
                switch (l) {
1951
                case 8:
1952
                    /* 64 bit write access */
1953
                    val = ldq_p(buf);
1954
                    error |= io_mem_write(mr, addr1, val, 8);
1955
                    break;
1956
                case 4:
1957
                    /* 32 bit write access */
1958
                    val = ldl_p(buf);
1959
                    error |= io_mem_write(mr, addr1, val, 4);
1960
                    break;
1961
                case 2:
1962
                    /* 16 bit write access */
1963
                    val = lduw_p(buf);
1964
                    error |= io_mem_write(mr, addr1, val, 2);
1965
                    break;
1966
                case 1:
1967
                    /* 8 bit write access */
1968
                    val = ldub_p(buf);
1969
                    error |= io_mem_write(mr, addr1, val, 1);
1970
                    break;
1971
                default:
1972
                    abort();
1973
                }
1974
            } else {
1975
                addr1 += memory_region_get_ram_addr(mr);
1976
                /* RAM case */
1977
                ptr = qemu_get_ram_ptr(addr1);
1978
                memcpy(ptr, buf, l);
1979
                invalidate_and_set_dirty(addr1, l);
1980
            }
1981
        } else {
1982
            if (!memory_access_is_direct(mr, is_write)) {
1983
                /* I/O case */
1984
                l = memory_access_size(mr, l, addr1);
1985
                switch (l) {
1986
                case 8:
1987
                    /* 64 bit read access */
1988
                    error |= io_mem_read(mr, addr1, &val, 8);
1989
                    stq_p(buf, val);
1990
                    break;
1991
                case 4:
1992
                    /* 32 bit read access */
1993
                    error |= io_mem_read(mr, addr1, &val, 4);
1994
                    stl_p(buf, val);
1995
                    break;
1996
                case 2:
1997
                    /* 16 bit read access */
1998
                    error |= io_mem_read(mr, addr1, &val, 2);
1999
                    stw_p(buf, val);
2000
                    break;
2001
                case 1:
2002
                    /* 8 bit read access */
2003
                    error |= io_mem_read(mr, addr1, &val, 1);
2004
                    stb_p(buf, val);
2005
                    break;
2006
                default:
2007
                    abort();
2008
                }
2009
            } else {
2010
                /* RAM case */
2011
                ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2012
                memcpy(buf, ptr, l);
2013
            }
2014
        }
2015
        len -= l;
2016
        buf += l;
2017
        addr += l;
2018
    }
2019

    
2020
    return error;
2021
}
2022

    
2023
bool address_space_write(AddressSpace *as, hwaddr addr,
2024
                         const uint8_t *buf, int len)
2025
{
2026
    return address_space_rw(as, addr, (uint8_t *)buf, len, true);
2027
}
2028

    
2029
bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
2030
{
2031
    return address_space_rw(as, addr, buf, len, false);
2032
}
2033

    
2034

    
2035
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2036
                            int len, int is_write)
2037
{
2038
    address_space_rw(&address_space_memory, addr, buf, len, is_write);
2039
}
2040

    
2041
/* used for ROM loading : can write in RAM and ROM */
2042
void cpu_physical_memory_write_rom(hwaddr addr,
2043
                                   const uint8_t *buf, int len)
2044
{
2045
    hwaddr l;
2046
    uint8_t *ptr;
2047
    hwaddr addr1;
2048
    MemoryRegion *mr;
2049

    
2050
    while (len > 0) {
2051
        l = len;
2052
        mr = address_space_translate(&address_space_memory,
2053
                                     addr, &addr1, &l, true);
2054

    
2055
        if (!(memory_region_is_ram(mr) ||
2056
              memory_region_is_romd(mr))) {
2057
            /* do nothing */
2058
        } else {
2059
            addr1 += memory_region_get_ram_addr(mr);
2060
            /* ROM/RAM case */
2061
            ptr = qemu_get_ram_ptr(addr1);
2062
            memcpy(ptr, buf, l);
2063
            invalidate_and_set_dirty(addr1, l);
2064
        }
2065
        len -= l;
2066
        buf += l;
2067
        addr += l;
2068
    }
2069
}
2070

    
2071
typedef struct {
2072
    MemoryRegion *mr;
2073
    void *buffer;
2074
    hwaddr addr;
2075
    hwaddr len;
2076
} BounceBuffer;
2077

    
2078
static BounceBuffer bounce;
2079

    
2080
typedef struct MapClient {
2081
    void *opaque;
2082
    void (*callback)(void *opaque);
2083
    QLIST_ENTRY(MapClient) link;
2084
} MapClient;
2085

    
2086
static QLIST_HEAD(map_client_list, MapClient) map_client_list
2087
    = QLIST_HEAD_INITIALIZER(map_client_list);
2088

    
2089
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2090
{
2091
    MapClient *client = g_malloc(sizeof(*client));
2092

    
2093
    client->opaque = opaque;
2094
    client->callback = callback;
2095
    QLIST_INSERT_HEAD(&map_client_list, client, link);
2096
    return client;
2097
}
2098

    
2099
static void cpu_unregister_map_client(void *_client)
2100
{
2101
    MapClient *client = (MapClient *)_client;
2102

    
2103
    QLIST_REMOVE(client, link);
2104
    g_free(client);
2105
}
2106

    
2107
static void cpu_notify_map_clients(void)
2108
{
2109
    MapClient *client;
2110

    
2111
    while (!QLIST_EMPTY(&map_client_list)) {
2112
        client = QLIST_FIRST(&map_client_list);
2113
        client->callback(client->opaque);
2114
        cpu_unregister_map_client(client);
2115
    }
2116
}
2117

    
2118
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2119
{
2120
    MemoryRegion *mr;
2121
    hwaddr l, xlat;
2122

    
2123
    while (len > 0) {
2124
        l = len;
2125
        mr = address_space_translate(as, addr, &xlat, &l, is_write);
2126
        if (!memory_access_is_direct(mr, is_write)) {
2127
            l = memory_access_size(mr, l, addr);
2128
            if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2129
                return false;
2130
            }
2131
        }
2132

    
2133
        len -= l;
2134
        addr += l;
2135
    }
2136
    return true;
2137
}
2138

    
2139
/* Map a physical memory region into a host virtual address.
2140
 * May map a subset of the requested range, given by and returned in *plen.
2141
 * May return NULL if resources needed to perform the mapping are exhausted.
2142
 * Use only for reads OR writes - not for read-modify-write operations.
2143
 * Use cpu_register_map_client() to know when retrying the map operation is
2144
 * likely to succeed.
2145
 */
2146
void *address_space_map(AddressSpace *as,
2147
                        hwaddr addr,
2148
                        hwaddr *plen,
2149
                        bool is_write)
2150
{
2151
    hwaddr len = *plen;
2152
    hwaddr done = 0;
2153
    hwaddr l, xlat, base;
2154
    MemoryRegion *mr, *this_mr;
2155
    ram_addr_t raddr;
2156

    
2157
    if (len == 0) {
2158
        return NULL;
2159
    }
2160

    
2161
    l = len;
2162
    mr = address_space_translate(as, addr, &xlat, &l, is_write);
2163
    if (!memory_access_is_direct(mr, is_write)) {
2164
        if (bounce.buffer) {
2165
            return NULL;
2166
        }
2167
        bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2168
        bounce.addr = addr;
2169
        bounce.len = l;
2170

    
2171
        memory_region_ref(mr);
2172
        bounce.mr = mr;
2173
        if (!is_write) {
2174
            address_space_read(as, addr, bounce.buffer, l);
2175
        }
2176

    
2177
        *plen = l;
2178
        return bounce.buffer;
2179
    }
2180

    
2181
    base = xlat;
2182
    raddr = memory_region_get_ram_addr(mr);
2183

    
2184
    for (;;) {
2185
        len -= l;
2186
        addr += l;
2187
        done += l;
2188
        if (len == 0) {
2189
            break;
2190
        }
2191

    
2192
        l = len;
2193
        this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2194
        if (this_mr != mr || xlat != base + done) {
2195
            break;
2196
        }
2197
    }
2198

    
2199
    memory_region_ref(mr);
2200
    *plen = done;
2201
    return qemu_ram_ptr_length(raddr + base, plen);
2202
}
2203

    
2204
/* Unmaps a memory region previously mapped by address_space_map().
2205
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
2206
 * the amount of memory that was actually read or written by the caller.
2207
 */
2208
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2209
                         int is_write, hwaddr access_len)
2210
{
2211
    if (buffer != bounce.buffer) {
2212
        MemoryRegion *mr;
2213
        ram_addr_t addr1;
2214

    
2215
        mr = qemu_ram_addr_from_host(buffer, &addr1);
2216
        assert(mr != NULL);
2217
        if (is_write) {
2218
            while (access_len) {
2219
                unsigned l;
2220
                l = TARGET_PAGE_SIZE;
2221
                if (l > access_len)
2222
                    l = access_len;
2223
                invalidate_and_set_dirty(addr1, l);
2224
                addr1 += l;
2225
                access_len -= l;
2226
            }
2227
        }
2228
        if (xen_enabled()) {
2229
            xen_invalidate_map_cache_entry(buffer);
2230
        }
2231
        memory_region_unref(mr);
2232
        return;
2233
    }
2234
    if (is_write) {
2235
        address_space_write(as, bounce.addr, bounce.buffer, access_len);
2236
    }
2237
    qemu_vfree(bounce.buffer);
2238
    bounce.buffer = NULL;
2239
    memory_region_unref(bounce.mr);
2240
    cpu_notify_map_clients();
2241
}
2242

    
2243
void *cpu_physical_memory_map(hwaddr addr,
2244
                              hwaddr *plen,
2245
                              int is_write)
2246
{
2247
    return address_space_map(&address_space_memory, addr, plen, is_write);
2248
}
2249

    
2250
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2251
                               int is_write, hwaddr access_len)
2252
{
2253
    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2254
}
2255

    
2256
/* warning: addr must be aligned */
2257
static inline uint32_t ldl_phys_internal(hwaddr addr,
2258
                                         enum device_endian endian)
2259
{
2260
    uint8_t *ptr;
2261
    uint64_t val;
2262
    MemoryRegion *mr;
2263
    hwaddr l = 4;
2264
    hwaddr addr1;
2265

    
2266
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2267
                                 false);
2268
    if (l < 4 || !memory_access_is_direct(mr, false)) {
2269
        /* I/O case */
2270
        io_mem_read(mr, addr1, &val, 4);
2271
#if defined(TARGET_WORDS_BIGENDIAN)
2272
        if (endian == DEVICE_LITTLE_ENDIAN) {
2273
            val = bswap32(val);
2274
        }
2275
#else
2276
        if (endian == DEVICE_BIG_ENDIAN) {
2277
            val = bswap32(val);
2278
        }
2279
#endif
2280
    } else {
2281
        /* RAM case */
2282
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2283
                                & TARGET_PAGE_MASK)
2284
                               + addr1);
2285
        switch (endian) {
2286
        case DEVICE_LITTLE_ENDIAN:
2287
            val = ldl_le_p(ptr);
2288
            break;
2289
        case DEVICE_BIG_ENDIAN:
2290
            val = ldl_be_p(ptr);
2291
            break;
2292
        default:
2293
            val = ldl_p(ptr);
2294
            break;
2295
        }
2296
    }
2297
    return val;
2298
}
2299

    
2300
uint32_t ldl_phys(hwaddr addr)
2301
{
2302
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2303
}
2304

    
2305
uint32_t ldl_le_phys(hwaddr addr)
2306
{
2307
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2308
}
2309

    
2310
uint32_t ldl_be_phys(hwaddr addr)
2311
{
2312
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2313
}
2314

    
2315
/* warning: addr must be aligned */
2316
static inline uint64_t ldq_phys_internal(hwaddr addr,
2317
                                         enum device_endian endian)
2318
{
2319
    uint8_t *ptr;
2320
    uint64_t val;
2321
    MemoryRegion *mr;
2322
    hwaddr l = 8;
2323
    hwaddr addr1;
2324

    
2325
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2326
                                 false);
2327
    if (l < 8 || !memory_access_is_direct(mr, false)) {
2328
        /* I/O case */
2329
        io_mem_read(mr, addr1, &val, 8);
2330
#if defined(TARGET_WORDS_BIGENDIAN)
2331
        if (endian == DEVICE_LITTLE_ENDIAN) {
2332
            val = bswap64(val);
2333
        }
2334
#else
2335
        if (endian == DEVICE_BIG_ENDIAN) {
2336
            val = bswap64(val);
2337
        }
2338
#endif
2339
    } else {
2340
        /* RAM case */
2341
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2342
                                & TARGET_PAGE_MASK)
2343
                               + addr1);
2344
        switch (endian) {
2345
        case DEVICE_LITTLE_ENDIAN:
2346
            val = ldq_le_p(ptr);
2347
            break;
2348
        case DEVICE_BIG_ENDIAN:
2349
            val = ldq_be_p(ptr);
2350
            break;
2351
        default:
2352
            val = ldq_p(ptr);
2353
            break;
2354
        }
2355
    }
2356
    return val;
2357
}
2358

    
2359
uint64_t ldq_phys(hwaddr addr)
2360
{
2361
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2362
}
2363

    
2364
uint64_t ldq_le_phys(hwaddr addr)
2365
{
2366
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2367
}
2368

    
2369
uint64_t ldq_be_phys(hwaddr addr)
2370
{
2371
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2372
}
2373

    
2374
/* XXX: optimize */
2375
uint32_t ldub_phys(hwaddr addr)
2376
{
2377
    uint8_t val;
2378
    cpu_physical_memory_read(addr, &val, 1);
2379
    return val;
2380
}
2381

    
2382
/* warning: addr must be aligned */
2383
static inline uint32_t lduw_phys_internal(hwaddr addr,
2384
                                          enum device_endian endian)
2385
{
2386
    uint8_t *ptr;
2387
    uint64_t val;
2388
    MemoryRegion *mr;
2389
    hwaddr l = 2;
2390
    hwaddr addr1;
2391

    
2392
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2393
                                 false);
2394
    if (l < 2 || !memory_access_is_direct(mr, false)) {
2395
        /* I/O case */
2396
        io_mem_read(mr, addr1, &val, 2);
2397
#if defined(TARGET_WORDS_BIGENDIAN)
2398
        if (endian == DEVICE_LITTLE_ENDIAN) {
2399
            val = bswap16(val);
2400
        }
2401
#else
2402
        if (endian == DEVICE_BIG_ENDIAN) {
2403
            val = bswap16(val);
2404
        }
2405
#endif
2406
    } else {
2407
        /* RAM case */
2408
        ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2409
                                & TARGET_PAGE_MASK)
2410
                               + addr1);
2411
        switch (endian) {
2412
        case DEVICE_LITTLE_ENDIAN:
2413
            val = lduw_le_p(ptr);
2414
            break;
2415
        case DEVICE_BIG_ENDIAN:
2416
            val = lduw_be_p(ptr);
2417
            break;
2418
        default:
2419
            val = lduw_p(ptr);
2420
            break;
2421
        }
2422
    }
2423
    return val;
2424
}
2425

    
2426
uint32_t lduw_phys(hwaddr addr)
2427
{
2428
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2429
}
2430

    
2431
uint32_t lduw_le_phys(hwaddr addr)
2432
{
2433
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2434
}
2435

    
2436
uint32_t lduw_be_phys(hwaddr addr)
2437
{
2438
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2439
}
2440

    
2441
/* warning: addr must be aligned. The ram page is not masked as dirty
2442
   and the code inside is not invalidated. It is useful if the dirty
2443
   bits are used to track modified PTEs */
2444
void stl_phys_notdirty(hwaddr addr, uint32_t val)
2445
{
2446
    uint8_t *ptr;
2447
    MemoryRegion *mr;
2448
    hwaddr l = 4;
2449
    hwaddr addr1;
2450

    
2451
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2452
                                 true);
2453
    if (l < 4 || !memory_access_is_direct(mr, true)) {
2454
        io_mem_write(mr, addr1, val, 4);
2455
    } else {
2456
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2457
        ptr = qemu_get_ram_ptr(addr1);
2458
        stl_p(ptr, val);
2459

    
2460
        if (unlikely(in_migration)) {
2461
            if (!cpu_physical_memory_is_dirty(addr1)) {
2462
                /* invalidate code */
2463
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2464
                /* set dirty bit */
2465
                cpu_physical_memory_set_dirty_flags(
2466
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
2467
            }
2468
        }
2469
    }
2470
}
2471

    
2472
/* warning: addr must be aligned */
2473
static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2474
                                     enum device_endian endian)
2475
{
2476
    uint8_t *ptr;
2477
    MemoryRegion *mr;
2478
    hwaddr l = 4;
2479
    hwaddr addr1;
2480

    
2481
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2482
                                 true);
2483
    if (l < 4 || !memory_access_is_direct(mr, true)) {
2484
#if defined(TARGET_WORDS_BIGENDIAN)
2485
        if (endian == DEVICE_LITTLE_ENDIAN) {
2486
            val = bswap32(val);
2487
        }
2488
#else
2489
        if (endian == DEVICE_BIG_ENDIAN) {
2490
            val = bswap32(val);
2491
        }
2492
#endif
2493
        io_mem_write(mr, addr1, val, 4);
2494
    } else {
2495
        /* RAM case */
2496
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2497
        ptr = qemu_get_ram_ptr(addr1);
2498
        switch (endian) {
2499
        case DEVICE_LITTLE_ENDIAN:
2500
            stl_le_p(ptr, val);
2501
            break;
2502
        case DEVICE_BIG_ENDIAN:
2503
            stl_be_p(ptr, val);
2504
            break;
2505
        default:
2506
            stl_p(ptr, val);
2507
            break;
2508
        }
2509
        invalidate_and_set_dirty(addr1, 4);
2510
    }
2511
}
2512

    
2513
void stl_phys(hwaddr addr, uint32_t val)
2514
{
2515
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2516
}
2517

    
2518
void stl_le_phys(hwaddr addr, uint32_t val)
2519
{
2520
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2521
}
2522

    
2523
void stl_be_phys(hwaddr addr, uint32_t val)
2524
{
2525
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2526
}
2527

    
2528
/* XXX: optimize */
2529
void stb_phys(hwaddr addr, uint32_t val)
2530
{
2531
    uint8_t v = val;
2532
    cpu_physical_memory_write(addr, &v, 1);
2533
}
2534

    
2535
/* warning: addr must be aligned */
2536
static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2537
                                     enum device_endian endian)
2538
{
2539
    uint8_t *ptr;
2540
    MemoryRegion *mr;
2541
    hwaddr l = 2;
2542
    hwaddr addr1;
2543

    
2544
    mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2545
                                 true);
2546
    if (l < 2 || !memory_access_is_direct(mr, true)) {
2547
#if defined(TARGET_WORDS_BIGENDIAN)
2548
        if (endian == DEVICE_LITTLE_ENDIAN) {
2549
            val = bswap16(val);
2550
        }
2551
#else
2552
        if (endian == DEVICE_BIG_ENDIAN) {
2553
            val = bswap16(val);
2554
        }
2555
#endif
2556
        io_mem_write(mr, addr1, val, 2);
2557
    } else {
2558
        /* RAM case */
2559
        addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
2560
        ptr = qemu_get_ram_ptr(addr1);
2561
        switch (endian) {
2562
        case DEVICE_LITTLE_ENDIAN:
2563
            stw_le_p(ptr, val);
2564
            break;
2565
        case DEVICE_BIG_ENDIAN:
2566
            stw_be_p(ptr, val);
2567
            break;
2568
        default:
2569
            stw_p(ptr, val);
2570
            break;
2571
        }
2572
        invalidate_and_set_dirty(addr1, 2);
2573
    }
2574
}
2575

    
2576
void stw_phys(hwaddr addr, uint32_t val)
2577
{
2578
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2579
}
2580

    
2581
void stw_le_phys(hwaddr addr, uint32_t val)
2582
{
2583
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2584
}
2585

    
2586
void stw_be_phys(hwaddr addr, uint32_t val)
2587
{
2588
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2589
}
2590

    
2591
/* XXX: optimize */
2592
void stq_phys(hwaddr addr, uint64_t val)
2593
{
2594
    val = tswap64(val);
2595
    cpu_physical_memory_write(addr, &val, 8);
2596
}
2597

    
2598
void stq_le_phys(hwaddr addr, uint64_t val)
2599
{
2600
    val = cpu_to_le64(val);
2601
    cpu_physical_memory_write(addr, &val, 8);
2602
}
2603

    
2604
void stq_be_phys(hwaddr addr, uint64_t val)
2605
{
2606
    val = cpu_to_be64(val);
2607
    cpu_physical_memory_write(addr, &val, 8);
2608
}
2609

    
2610
/* virtual memory access for debug (includes writing to ROM) */
2611
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2612
                        uint8_t *buf, int len, int is_write)
2613
{
2614
    int l;
2615
    hwaddr phys_addr;
2616
    target_ulong page;
2617

    
2618
    while (len > 0) {
2619
        page = addr & TARGET_PAGE_MASK;
2620
        phys_addr = cpu_get_phys_page_debug(cpu, page);
2621
        /* if no physical page mapped, return an error */
2622
        if (phys_addr == -1)
2623
            return -1;
2624
        l = (page + TARGET_PAGE_SIZE) - addr;
2625
        if (l > len)
2626
            l = len;
2627
        phys_addr += (addr & ~TARGET_PAGE_MASK);
2628
        if (is_write)
2629
            cpu_physical_memory_write_rom(phys_addr, buf, l);
2630
        else
2631
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
2632
        len -= l;
2633
        buf += l;
2634
        addr += l;
2635
    }
2636
    return 0;
2637
}
2638
#endif
2639

    
2640
#if !defined(CONFIG_USER_ONLY)
2641

    
2642
/*
2643
 * A helper function for the _utterly broken_ virtio device model to find out if
2644
 * it's running on a big endian machine. Don't do this at home kids!
2645
 */
2646
bool virtio_is_big_endian(void);
2647
bool virtio_is_big_endian(void)
2648
{
2649
#if defined(TARGET_WORDS_BIGENDIAN)
2650
    return true;
2651
#else
2652
    return false;
2653
#endif
2654
}
2655

    
2656
#endif
2657

    
2658
#ifndef CONFIG_USER_ONLY
2659
bool cpu_physical_memory_is_io(hwaddr phys_addr)
2660
{
2661
    MemoryRegion*mr;
2662
    hwaddr l = 1;
2663

    
2664
    mr = address_space_translate(&address_space_memory,
2665
                                 phys_addr, &phys_addr, &l, false);
2666

    
2667
    return !(memory_region_is_ram(mr) ||
2668
             memory_region_is_romd(mr));
2669
}
2670

    
2671
void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2672
{
2673
    RAMBlock *block;
2674

    
2675
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2676
        func(block->host, block->offset, block->length, opaque);
2677
    }
2678
}
2679
#endif