Revision 99a0949b exec.c

b/exec.c
83 83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84 84
static int nb_tbs;
85 85
/* any access to the tbs or the page table must use this lock */
86
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86
a_spinlock tb_lock = SPIN_LOCK_UNLOCKED;
87 87

  
88 88
#if defined(__arm__) || defined(__sparc_v9__)
89 89
/* The prologue must be reachable with a direct jump. ARM and Sparc64
......
115 115

  
116 116
typedef struct RAMBlock {
117 117
    uint8_t *host;
118
    ram_addr_t offset;
119
    ram_addr_t length;
118
    a_ram_addr offset;
119
    a_ram_addr length;
120 120
    struct RAMBlock *next;
121 121
} RAMBlock;
122 122

  
......
124 124
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125 125
   then we can no longer assume contiguous ram offsets, and external uses
126 126
   of this variable will break.  */
127
ram_addr_t last_ram_offset;
127
a_ram_addr last_ram_offset;
128 128
#endif
129 129

  
130 130
CPUState *first_cpu;
......
153 153

  
154 154
typedef struct PhysPageDesc {
155 155
    /* offset in host memory of the page + io_index in the low bits */
156
    ram_addr_t phys_offset;
157
    ram_addr_t region_offset;
156
    a_ram_addr phys_offset;
157
    a_ram_addr region_offset;
158 158
} PhysPageDesc;
159 159

  
160 160
#define L2_BITS 10
......
203 203
static int tb_phys_invalidate_count;
204 204

  
205 205
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206
typedef struct subpage_t {
207
    target_phys_addr_t base;
206
typedef struct subpage {
207
    a_target_phys_addr base;
208 208
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209 209
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
210 210
    void *opaque[TARGET_PAGE_SIZE][2][4];
211
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212
} subpage_t;
211
    a_ram_addr region_offset[TARGET_PAGE_SIZE][2][4];
212
} a_subpage;
213 213

  
214 214
#ifdef _WIN32
215 215
static void map_exec(void *addr, long size)
......
346 346
    return p + (index & (L2_SIZE - 1));
347 347
}
348 348

  
349
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
349
static PhysPageDesc *phys_page_find_alloc(a_target_phys_addr index, int alloc)
350 350
{
351 351
    void **lp, **p;
352 352
    PhysPageDesc *pd;
......
385 385
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386 386
}
387 387

  
388
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
388
static inline PhysPageDesc *phys_page_find(a_target_phys_addr index)
389 389
{
390 390
    return phys_page_find_alloc(index, 0);
391 391
}
392 392

  
393 393
#if !defined(CONFIG_USER_ONLY)
394
static void tlb_protect_code(ram_addr_t ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
394
static void tlb_protect_code(a_ram_addr ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, a_ram_addr ram_addr,
396 396
                                    target_ulong vaddr);
397 397
#define mmap_lock() do { } while(0)
398 398
#define mmap_unlock() do { } while(0)
......
766 766
    CPUState *env;
767 767
    PageDesc *p;
768 768
    unsigned int h, n1;
769
    target_phys_addr_t phys_pc;
769
    a_target_phys_addr phys_pc;
770 770
    TranslationBlock *tb1, *tb2;
771 771

  
772 772
    /* remove the TB from the hash list */
......
914 914
   the same physical page. 'is_cpu_write_access' should be true if called
915 915
   from a real cpu write access: the virtual CPU will exit the current
916 916
   TB if code is modified inside this TB. */
917
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
917
void tb_invalidate_phys_page_range(a_target_phys_addr start, a_target_phys_addr end,
918 918
                                   int is_cpu_write_access)
919 919
{
920 920
    TranslationBlock *tb, *tb_next, *saved_tb;
......
1021 1021
}
1022 1022

  
1023 1023
/* len must be <= 8 and start must be a multiple of len */
1024
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1024
static inline void tb_invalidate_phys_page_fast(a_target_phys_addr start, int len)
1025 1025
{
1026 1026
    PageDesc *p;
1027 1027
    int offset, b;
......
1048 1048
}
1049 1049

  
1050 1050
#if !defined(CONFIG_SOFTMMU)
1051
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1051
static void tb_invalidate_phys_page(a_target_phys_addr addr,
1052 1052
                                    unsigned long pc, void *puc)
1053 1053
{
1054 1054
    TranslationBlock *tb;
......
1310 1310
#if defined(TARGET_HAS_ICE)
1311 1311
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1312 1312
{
1313
    target_phys_addr_t addr;
1313
    a_target_phys_addr addr;
1314 1314
    target_ulong pd;
1315
    ram_addr_t ram_addr;
1315
    a_ram_addr ram_addr;
1316 1316
    PhysPageDesc *p;
1317 1317

  
1318 1318
    addr = cpu_get_phys_page_debug(env, pc);
......
1533 1533
       signals are used primarily to interrupt blocking syscalls.  */
1534 1534
#else
1535 1535
    TranslationBlock *tb;
1536
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1536
    static a_spinlock interrupt_lock = SPIN_LOCK_UNLOCKED;
1537 1537

  
1538 1538
    tb = env->current_tb;
1539 1539
    /* if the cpu is currently executing code, we must unlink it and
......
1810 1810

  
1811 1811
/* update the TLBs so that writes to code in the virtual page 'addr'
1812 1812
   can be detected */
1813
static void tlb_protect_code(ram_addr_t ram_addr)
1813
static void tlb_protect_code(a_ram_addr ram_addr)
1814 1814
{
1815 1815
    cpu_physical_memory_reset_dirty(ram_addr,
1816 1816
                                    ram_addr + TARGET_PAGE_SIZE,
......
1819 1819

  
1820 1820
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1821 1821
   tested for self modifying code */
1822
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1822
static void tlb_unprotect_code_phys(CPUState *env, a_ram_addr ram_addr,
1823 1823
                                    target_ulong vaddr)
1824 1824
{
1825 1825
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
......
1838 1838
}
1839 1839

  
1840 1840
/* Note: start and end must be within the same ram block.  */
1841
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1841
void cpu_physical_memory_reset_dirty(a_ram_addr start, a_ram_addr end,
1842 1842
                                     int dirty_flags)
1843 1843
{
1844 1844
    CPUState *env;
......
1892 1892
    return in_migration;
1893 1893
}
1894 1894

  
1895
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1896
                                   target_phys_addr_t end_addr)
1895
int cpu_physical_sync_dirty_bitmap(a_target_phys_addr start_addr,
1896
                                   a_target_phys_addr end_addr)
1897 1897
{
1898 1898
    int ret = 0;
1899 1899

  
......
1904 1904

  
1905 1905
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1906 1906
{
1907
    ram_addr_t ram_addr;
1907
    a_ram_addr ram_addr;
1908 1908
    void *p;
1909 1909

  
1910 1910
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
......
1952 1952
   (can only happen in non SOFTMMU mode for I/O pages or pages
1953 1953
   conflicting with the host address space). */
1954 1954
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1955
                      target_phys_addr_t paddr, int prot,
1955
                      a_target_phys_addr paddr, int prot,
1956 1956
                      int mmu_idx, int is_softmmu)
1957 1957
{
1958 1958
    PhysPageDesc *p;
......
1960 1960
    unsigned int index;
1961 1961
    target_ulong address;
1962 1962
    target_ulong code_address;
1963
    target_phys_addr_t addend;
1963
    a_target_phys_addr addend;
1964 1964
    int ret;
1965 1965
    CPUTLBEntry *te;
1966 1966
    CPUWatchpoint *wp;
1967
    target_phys_addr_t iotlb;
1967
    a_target_phys_addr iotlb;
1968 1968

  
1969 1969
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1970 1970
    if (!p) {
......
2061 2061
}
2062 2062

  
2063 2063
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2064
                      target_phys_addr_t paddr, int prot,
2064
                      a_target_phys_addr paddr, int prot,
2065 2065
                      int mmu_idx, int is_softmmu)
2066 2066
{
2067 2067
    return 0;
......
2267 2267

  
2268 2268
#if !defined(CONFIG_USER_ONLY)
2269 2269

  
2270
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2271
                             ram_addr_t memory, ram_addr_t region_offset);
2272
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2273
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2270
static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
2271
                             a_ram_addr memory, a_ram_addr region_offset);
2272
static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
2273
                           a_ram_addr orig_memory, a_ram_addr region_offset);
2274 2274
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2275 2275
                      need_subpage)                                     \
2276 2276
    do {                                                                \
......
2298 2298
   start_addr and region_offset are rounded down to a page boundary
2299 2299
   before calculating this offset.  This should not be a problem unless
2300 2300
   the low bits of start_addr and region_offset differ.  */
2301
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2302
                                         ram_addr_t size,
2303
                                         ram_addr_t phys_offset,
2304
                                         ram_addr_t region_offset)
2301
void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
2302
                                         a_ram_addr size,
2303
                                         a_ram_addr phys_offset,
2304
                                         a_ram_addr region_offset)
2305 2305
{
2306
    target_phys_addr_t addr, end_addr;
2306
    a_target_phys_addr addr, end_addr;
2307 2307
    PhysPageDesc *p;
2308 2308
    CPUState *env;
2309
    ram_addr_t orig_size = size;
2309
    a_ram_addr orig_size = size;
2310 2310
    void *subpage;
2311 2311

  
2312 2312
    if (kvm_enabled())
......
2317 2317
    }
2318 2318
    region_offset &= TARGET_PAGE_MASK;
2319 2319
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2320
    end_addr = start_addr + (target_phys_addr_t)size;
2320
    end_addr = start_addr + (a_target_phys_addr)size;
2321 2321
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2322 2322
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2323 2323
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2324
            ram_addr_t orig_memory = p->phys_offset;
2325
            target_phys_addr_t start_addr2, end_addr2;
2324
            a_ram_addr orig_memory = p->phys_offset;
2325
            a_target_phys_addr start_addr2, end_addr2;
2326 2326
            int need_subpage = 0;
2327 2327

  
2328 2328
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
......
2353 2353
                (phys_offset & IO_MEM_ROMD)) {
2354 2354
                phys_offset += TARGET_PAGE_SIZE;
2355 2355
            } else {
2356
                target_phys_addr_t start_addr2, end_addr2;
2356
                a_target_phys_addr start_addr2, end_addr2;
2357 2357
                int need_subpage = 0;
2358 2358

  
2359 2359
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
......
2381 2381
}
2382 2382

  
2383 2383
/* XXX: temporary until new memory mapping API */
2384
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2384
a_ram_addr cpu_get_physical_page_desc(a_target_phys_addr addr)
2385 2385
{
2386 2386
    PhysPageDesc *p;
2387 2387

  
......
2391 2391
    return p->phys_offset;
2392 2392
}
2393 2393

  
2394
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2394
void qemu_register_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size)
2395 2395
{
2396 2396
    if (kvm_enabled())
2397 2397
        kvm_coalesce_mmio_region(addr, size);
2398 2398
}
2399 2399

  
2400
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2400
void qemu_unregister_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size)
2401 2401
{
2402 2402
    if (kvm_enabled())
2403 2403
        kvm_uncoalesce_mmio_region(addr, size);
2404 2404
}
2405 2405

  
2406
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2406
a_ram_addr qemu_ram_alloc(a_ram_addr size)
2407 2407
{
2408 2408
    RAMBlock *new_block;
2409 2409

  
......
2430 2430
    return new_block->offset;
2431 2431
}
2432 2432

  
2433
void qemu_ram_free(ram_addr_t addr)
2433
void qemu_ram_free(a_ram_addr addr)
2434 2434
{
2435 2435
    /* TODO: implement this.  */
2436 2436
}
......
2443 2443
   It should not be used for general purpose DMA.
2444 2444
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2445 2445
 */
2446
void *qemu_get_ram_ptr(ram_addr_t addr)
2446
void *qemu_get_ram_ptr(a_ram_addr addr)
2447 2447
{
2448 2448
    RAMBlock *prev;
2449 2449
    RAMBlock **prevp;
......
2474 2474

  
2475 2475
/* Some of the softmmu routines need to translate from a host pointer
2476 2476
   (typically a TLB entry) back to a ram offset.  */
2477
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2477
a_ram_addr qemu_ram_addr_from_host(void *ptr)
2478 2478
{
2479 2479
    RAMBlock *prev;
2480 2480
    RAMBlock **prevp;
......
2498 2498
    return block->offset + (host - block->host);
2499 2499
}
2500 2500

  
2501
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2501
static uint32_t unassigned_mem_readb(void *opaque, a_target_phys_addr addr)
2502 2502
{
2503 2503
#ifdef DEBUG_UNASSIGNED
2504 2504
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
......
2509 2509
    return 0;
2510 2510
}
2511 2511

  
2512
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2512
static uint32_t unassigned_mem_readw(void *opaque, a_target_phys_addr addr)
2513 2513
{
2514 2514
#ifdef DEBUG_UNASSIGNED
2515 2515
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
......
2520 2520
    return 0;
2521 2521
}
2522 2522

  
2523
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2523
static uint32_t unassigned_mem_readl(void *opaque, a_target_phys_addr addr)
2524 2524
{
2525 2525
#ifdef DEBUG_UNASSIGNED
2526 2526
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
......
2531 2531
    return 0;
2532 2532
}
2533 2533

  
2534
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2534
static void unassigned_mem_writeb(void *opaque, a_target_phys_addr addr, uint32_t val)
2535 2535
{
2536 2536
#ifdef DEBUG_UNASSIGNED
2537 2537
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
......
2541 2541
#endif
2542 2542
}
2543 2543

  
2544
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2544
static void unassigned_mem_writew(void *opaque, a_target_phys_addr addr, uint32_t val)
2545 2545
{
2546 2546
#ifdef DEBUG_UNASSIGNED
2547 2547
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
......
2551 2551
#endif
2552 2552
}
2553 2553

  
2554
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2554
static void unassigned_mem_writel(void *opaque, a_target_phys_addr addr, uint32_t val)
2555 2555
{
2556 2556
#ifdef DEBUG_UNASSIGNED
2557 2557
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
......
2573 2573
    unassigned_mem_writel,
2574 2574
};
2575 2575

  
2576
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2576
static void notdirty_mem_writeb(void *opaque, a_target_phys_addr ram_addr,
2577 2577
                                uint32_t val)
2578 2578
{
2579 2579
    int dirty_flags;
......
2593 2593
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2594 2594
}
2595 2595

  
2596
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2596
static void notdirty_mem_writew(void *opaque, a_target_phys_addr ram_addr,
2597 2597
                                uint32_t val)
2598 2598
{
2599 2599
    int dirty_flags;
......
2613 2613
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2614 2614
}
2615 2615

  
2616
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2616
static void notdirty_mem_writel(void *opaque, a_target_phys_addr ram_addr,
2617 2617
                                uint32_t val)
2618 2618
{
2619 2619
    int dirty_flags;
......
2693 2693
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2694 2694
   so these check for a hit then pass through to the normal out-of-line
2695 2695
   phys routines.  */
2696
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2696
static uint32_t watch_mem_readb(void *opaque, a_target_phys_addr addr)
2697 2697
{
2698 2698
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2699 2699
    return ldub_phys(addr);
2700 2700
}
2701 2701

  
2702
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2702
static uint32_t watch_mem_readw(void *opaque, a_target_phys_addr addr)
2703 2703
{
2704 2704
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2705 2705
    return lduw_phys(addr);
2706 2706
}
2707 2707

  
2708
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2708
static uint32_t watch_mem_readl(void *opaque, a_target_phys_addr addr)
2709 2709
{
2710 2710
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2711 2711
    return ldl_phys(addr);
2712 2712
}
2713 2713

  
2714
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2714
static void watch_mem_writeb(void *opaque, a_target_phys_addr addr,
2715 2715
                             uint32_t val)
2716 2716
{
2717 2717
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2718 2718
    stb_phys(addr, val);
2719 2719
}
2720 2720

  
2721
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2721
static void watch_mem_writew(void *opaque, a_target_phys_addr addr,
2722 2722
                             uint32_t val)
2723 2723
{
2724 2724
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2725 2725
    stw_phys(addr, val);
2726 2726
}
2727 2727

  
2728
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2728
static void watch_mem_writel(void *opaque, a_target_phys_addr addr,
2729 2729
                             uint32_t val)
2730 2730
{
2731 2731
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
......
2744 2744
    watch_mem_writel,
2745 2745
};
2746 2746

  
2747
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2747
static inline uint32_t subpage_readlen (a_subpage *mmio, a_target_phys_addr addr,
2748 2748
                                 unsigned int len)
2749 2749
{
2750 2750
    uint32_t ret;
......
2761 2761
    return ret;
2762 2762
}
2763 2763

  
2764
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2764
static inline void subpage_writelen (a_subpage *mmio, a_target_phys_addr addr,
2765 2765
                              uint32_t value, unsigned int len)
2766 2766
{
2767 2767
    unsigned int idx;
......
2776 2776
                                  value);
2777 2777
}
2778 2778

  
2779
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2779
static uint32_t subpage_readb (void *opaque, a_target_phys_addr addr)
2780 2780
{
2781 2781
#if defined(DEBUG_SUBPAGE)
2782 2782
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
......
2785 2785
    return subpage_readlen(opaque, addr, 0);
2786 2786
}
2787 2787

  
2788
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2788
static void subpage_writeb (void *opaque, a_target_phys_addr addr,
2789 2789
                            uint32_t value)
2790 2790
{
2791 2791
#if defined(DEBUG_SUBPAGE)
......
2794 2794
    subpage_writelen(opaque, addr, value, 0);
2795 2795
}
2796 2796

  
2797
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2797
static uint32_t subpage_readw (void *opaque, a_target_phys_addr addr)
2798 2798
{
2799 2799
#if defined(DEBUG_SUBPAGE)
2800 2800
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
......
2803 2803
    return subpage_readlen(opaque, addr, 1);
2804 2804
}
2805 2805

  
2806
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2806
static void subpage_writew (void *opaque, a_target_phys_addr addr,
2807 2807
                            uint32_t value)
2808 2808
{
2809 2809
#if defined(DEBUG_SUBPAGE)
......
2812 2812
    subpage_writelen(opaque, addr, value, 1);
2813 2813
}
2814 2814

  
2815
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2815
static uint32_t subpage_readl (void *opaque, a_target_phys_addr addr)
2816 2816
{
2817 2817
#if defined(DEBUG_SUBPAGE)
2818 2818
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
......
2822 2822
}
2823 2823

  
2824 2824
static void subpage_writel (void *opaque,
2825
                         target_phys_addr_t addr, uint32_t value)
2825
                         a_target_phys_addr addr, uint32_t value)
2826 2826
{
2827 2827
#if defined(DEBUG_SUBPAGE)
2828 2828
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
......
2842 2842
    &subpage_writel,
2843 2843
};
2844 2844

  
2845
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2846
                             ram_addr_t memory, ram_addr_t region_offset)
2845
static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
2846
                             a_ram_addr memory, a_ram_addr region_offset)
2847 2847
{
2848 2848
    int idx, eidx;
2849 2849
    unsigned int i;
......
2875 2875
    return 0;
2876 2876
}
2877 2877

  
2878
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2879
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2878
static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
2879
                           a_ram_addr orig_memory, a_ram_addr region_offset)
2880 2880
{
2881
    subpage_t *mmio;
2881
    a_subpage *mmio;
2882 2882
    int subpage_memory;
2883 2883

  
2884
    mmio = qemu_mallocz(sizeof(subpage_t));
2884
    mmio = qemu_mallocz(sizeof(a_subpage));
2885 2885

  
2886 2886
    mmio->base = base;
2887 2887
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
......
2981 2981

  
2982 2982
/* physical memory access (slow version, mainly for debug) */
2983 2983
#if defined(CONFIG_USER_ONLY)
2984
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2984
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
2985 2985
                            int len, int is_write)
2986 2986
{
2987 2987
    int l, flags;
......
3022 3022
}
3023 3023

  
3024 3024
#else
3025
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3025
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
3026 3026
                            int len, int is_write)
3027 3027
{
3028 3028
    int l, io_index;
3029 3029
    uint8_t *ptr;
3030 3030
    uint32_t val;
3031
    target_phys_addr_t page;
3031
    a_target_phys_addr page;
3032 3032
    unsigned long pd;
3033 3033
    PhysPageDesc *p;
3034 3034

  
......
3046 3046

  
3047 3047
        if (is_write) {
3048 3048
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3049
                target_phys_addr_t addr1 = addr;
3049
                a_target_phys_addr addr1 = addr;
3050 3050
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3051 3051
                if (p)
3052 3052
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
......
3085 3085
        } else {
3086 3086
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3087 3087
                !(pd & IO_MEM_ROMD)) {
3088
                target_phys_addr_t addr1 = addr;
3088
                a_target_phys_addr addr1 = addr;
3089 3089
                /* I/O case */
3090 3090
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3091 3091
                if (p)
......
3120 3120
}
3121 3121

  
3122 3122
/* used for ROM loading : can write in RAM and ROM */
3123
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3123
void cpu_physical_memory_write_rom(a_target_phys_addr addr,
3124 3124
                                   const uint8_t *buf, int len)
3125 3125
{
3126 3126
    int l;
3127 3127
    uint8_t *ptr;
3128
    target_phys_addr_t page;
3128
    a_target_phys_addr page;
3129 3129
    unsigned long pd;
3130 3130
    PhysPageDesc *p;
3131 3131

  
......
3160 3160

  
3161 3161
typedef struct {
3162 3162
    void *buffer;
3163
    target_phys_addr_t addr;
3164
    target_phys_addr_t len;
3163
    a_target_phys_addr addr;
3164
    a_target_phys_addr len;
3165 3165
} BounceBuffer;
3166 3166

  
3167 3167
static BounceBuffer bounce;
......
3211 3211
 * Use cpu_register_map_client() to know when retrying the map operation is
3212 3212
 * likely to succeed.
3213 3213
 */
3214
void *cpu_physical_memory_map(target_phys_addr_t addr,
3215
                              target_phys_addr_t *plen,
3214
void *cpu_physical_memory_map(a_target_phys_addr addr,
3215
                              a_target_phys_addr *plen,
3216 3216
                              int is_write)
3217 3217
{
3218
    target_phys_addr_t len = *plen;
3219
    target_phys_addr_t done = 0;
3218
    a_target_phys_addr len = *plen;
3219
    a_target_phys_addr done = 0;
3220 3220
    int l;
3221 3221
    uint8_t *ret = NULL;
3222 3222
    uint8_t *ptr;
3223
    target_phys_addr_t page;
3223
    a_target_phys_addr page;
3224 3224
    unsigned long pd;
3225 3225
    PhysPageDesc *p;
3226 3226
    unsigned long addr1;
......
3270 3270
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3271 3271
 * the amount of memory that was actually read or written by the caller.
3272 3272
 */
3273
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3274
                               int is_write, target_phys_addr_t access_len)
3273
void cpu_physical_memory_unmap(void *buffer, a_target_phys_addr len,
3274
                               int is_write, a_target_phys_addr access_len)
3275 3275
{
3276 3276
    if (buffer != bounce.buffer) {
3277 3277
        if (is_write) {
3278
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3278
            a_ram_addr addr1 = qemu_ram_addr_from_host(buffer);
3279 3279
            while (access_len) {
3280 3280
                unsigned l;
3281 3281
                l = TARGET_PAGE_SIZE;
......
3303 3303
}
3304 3304

  
3305 3305
/* warning: addr must be aligned */
3306
uint32_t ldl_phys(target_phys_addr_t addr)
3306
uint32_t ldl_phys(a_target_phys_addr addr)
3307 3307
{
3308 3308
    int io_index;
3309 3309
    uint8_t *ptr;
......
3335 3335
}
3336 3336

  
3337 3337
/* warning: addr must be aligned */
3338
uint64_t ldq_phys(target_phys_addr_t addr)
3338
uint64_t ldq_phys(a_target_phys_addr addr)
3339 3339
{
3340 3340
    int io_index;
3341 3341
    uint8_t *ptr;
......
3373 3373
}
3374 3374

  
3375 3375
/* XXX: optimize */
3376
uint32_t ldub_phys(target_phys_addr_t addr)
3376
uint32_t ldub_phys(a_target_phys_addr addr)
3377 3377
{
3378 3378
    uint8_t val;
3379 3379
    cpu_physical_memory_read(addr, &val, 1);
......
3381 3381
}
3382 3382

  
3383 3383
/* XXX: optimize */
3384
uint32_t lduw_phys(target_phys_addr_t addr)
3384
uint32_t lduw_phys(a_target_phys_addr addr)
3385 3385
{
3386 3386
    uint16_t val;
3387 3387
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
......
3391 3391
/* warning: addr must be aligned. The ram page is not masked as dirty
3392 3392
   and the code inside is not invalidated. It is useful if the dirty
3393 3393
   bits are used to track modified PTEs */
3394
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3394
void stl_phys_notdirty(a_target_phys_addr addr, uint32_t val)
3395 3395
{
3396 3396
    int io_index;
3397 3397
    uint8_t *ptr;
......
3427 3427
    }
3428 3428
}
3429 3429

  
3430
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3430
void stq_phys_notdirty(a_target_phys_addr addr, uint64_t val)
3431 3431
{
3432 3432
    int io_index;
3433 3433
    uint8_t *ptr;
......
3460 3460
}
3461 3461

  
3462 3462
/* warning: addr must be aligned */
3463
void stl_phys(target_phys_addr_t addr, uint32_t val)
3463
void stl_phys(a_target_phys_addr addr, uint32_t val)
3464 3464
{
3465 3465
    int io_index;
3466 3466
    uint8_t *ptr;
......
3496 3496
}
3497 3497

  
3498 3498
/* XXX: optimize */
3499
void stb_phys(target_phys_addr_t addr, uint32_t val)
3499
void stb_phys(a_target_phys_addr addr, uint32_t val)
3500 3500
{
3501 3501
    uint8_t v = val;
3502 3502
    cpu_physical_memory_write(addr, &v, 1);
3503 3503
}
3504 3504

  
3505 3505
/* XXX: optimize */
3506
void stw_phys(target_phys_addr_t addr, uint32_t val)
3506
void stw_phys(a_target_phys_addr addr, uint32_t val)
3507 3507
{
3508 3508
    uint16_t v = tswap16(val);
3509 3509
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3510 3510
}
3511 3511

  
3512 3512
/* XXX: optimize */
3513
void stq_phys(target_phys_addr_t addr, uint64_t val)
3513
void stq_phys(a_target_phys_addr addr, uint64_t val)
3514 3514
{
3515 3515
    val = tswap64(val);
3516 3516
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
......
3523 3523
                        uint8_t *buf, int len, int is_write)
3524 3524
{
3525 3525
    int l;
3526
    target_phys_addr_t phys_addr;
3526
    a_target_phys_addr phys_addr;
3527 3527
    target_ulong page;
3528 3528

  
3529 3529
    while (len > 0) {

Also available in: Unified diff