Revision 5fafdf24 exec.c

b/exec.c
1 1
/*
2 2
 *  virtual page mapping and translated block handling
3
 * 
3
 *
4 4
 *  Copyright (c) 2003 Fabrice Bellard
5 5
 *
6 6
 * This library is free software; you can redistribute it and/or
......
44 44
//#define DEBUG_UNASSIGNED
45 45

  
46 46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
47
//#define DEBUG_TB_CHECK
48
//#define DEBUG_TLB_CHECK
49 49

  
50 50
//#define DEBUG_IOPORT
51 51
//#define DEBUG_SUBPAGE
......
95 95
CPUState *first_cpu;
96 96
/* current CPU in the current thread. It is only valid inside
97 97
   cpu_exec() */
98
CPUState *cpu_single_env; 
98
CPUState *cpu_single_env;
99 99

  
100 100
typedef struct PageDesc {
101 101
    /* list of TBs intersecting this ram page */
......
175 175
    {
176 176
        SYSTEM_INFO system_info;
177 177
        DWORD old_protect;
178
        
178
       
179 179
        GetSystemInfo(&system_info);
180 180
        qemu_real_host_page_size = system_info.dwPageSize;
181
        
181
       
182 182
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183 183
                       PAGE_EXECUTE_READWRITE, &old_protect);
184 184
    }
......
189 189

  
190 190
        start = (unsigned long)code_gen_buffer;
191 191
        start &= ~(qemu_real_host_page_size - 1);
192
        
192
       
193 193
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194 194
        end += qemu_real_host_page_size - 1;
195 195
        end &= ~(qemu_real_host_page_size - 1);
196
        
197
        mprotect((void *)start, end - start, 
196
       
197
        mprotect((void *)start, end - start,
198 198
                 PROT_READ | PROT_WRITE | PROT_EXEC);
199 199
    }
200 200
#endif
......
280 280

  
281 281
#if !defined(CONFIG_USER_ONLY)
282 282
static void tlb_protect_code(ram_addr_t ram_addr);
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284 284
                                    target_ulong vaddr);
285 285
#endif
286 286

  
......
339 339
{
340 340
    CPUState *env;
341 341
#if defined(DEBUG_FLUSH)
342
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
343
           code_gen_ptr - code_gen_buffer, 
344
           nb_tbs, 
342
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343
           code_gen_ptr - code_gen_buffer,
344
           nb_tbs,
345 345
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
346 346
#endif
347 347
    nb_tbs = 0;
348
    
348
   
349 349
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
350 350
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351 351
    }
......
382 382
{
383 383
    TranslationBlock *tb;
384 384
    int i, flags1, flags2;
385
    
385
   
386 386
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387 387
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388 388
            flags1 = page_get_flags(tb->pc);
......
491 491
    unsigned int h, n1;
492 492
    target_ulong phys_pc;
493 493
    TranslationBlock *tb1, *tb2;
494
    
494
   
495 495
    /* remove the TB from the hash list */
496 496
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497 497
    h = tb_phys_hash_func(phys_pc);
498
    tb_remove(&tb_phys_hash[h], tb, 
498
    tb_remove(&tb_phys_hash[h], tb,
499 499
              offsetof(TranslationBlock, phys_hash_next));
500 500

  
501 501
    /* remove the TB from the page list */
......
571 571
{
572 572
    int n, tb_start, tb_end;
573 573
    TranslationBlock *tb;
574
    
574
   
575 575
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576 576
    if (!p->code_bitmap)
577 577
        return;
......
600 600

  
601 601
#ifdef TARGET_HAS_PRECISE_SMC
602 602

  
603
static void tb_gen_code(CPUState *env, 
603
static void tb_gen_code(CPUState *env,
604 604
                        target_ulong pc, target_ulong cs_base, int flags,
605 605
                        int cflags)
606 606
{
......
624 624
    tb->cflags = cflags;
625 625
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626 626
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627
    
627
   
628 628
    /* check next page if needed */
629 629
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630 630
    phys_page2 = -1;
......
634 634
    tb_link_phys(tb, phys_pc, phys_page2);
635 635
}
636 636
#endif
637
    
637
   
638 638
/* invalidate all TBs which intersect with the target physical page
639 639
   starting in range [start;end[. NOTE: start and end must refer to
640 640
   the same physical page. 'is_cpu_write_access' should be true if called
641 641
   from a real cpu write access: the virtual CPU will exit the current
642 642
   TB if code is modified inside this TB. */
643
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
643
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644 644
                                   int is_cpu_write_access)
645 645
{
646 646
    int n, current_tb_modified, current_tb_not_found, current_flags;
......
651 651
    target_ulong current_pc, current_cs_base;
652 652

  
653 653
    p = page_find(start >> TARGET_PAGE_BITS);
654
    if (!p) 
654
    if (!p)
655 655
        return;
656
    if (!p->code_bitmap && 
656
    if (!p->code_bitmap &&
657 657
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658 658
        is_cpu_write_access) {
659 659
        /* build code bitmap */
......
700 700
                that the modification is after the current PC, but it
701 701
                would require a specialized function to partially
702 702
                restore the CPU state */
703
                
703
               
704 704
                current_tb_modified = 1;
705
                cpu_restore_state(current_tb, env, 
705
                cpu_restore_state(current_tb, env,
706 706
                                  env->mem_write_pc, NULL);
707 707
#if defined(TARGET_I386)
708 708
                current_flags = env->hflags;
......
745 745
           modifying the memory. It will ensure that it cannot modify
746 746
           itself */
747 747
        env->current_tb = NULL;
748
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
748
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
749 749
                    CF_SINGLE_INSN);
750 750
        cpu_resume_from_signal(env, NULL);
751 751
    }
......
760 760
#if 0
761 761
    if (1) {
762 762
        if (loglevel) {
763
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
764
                   cpu_single_env->mem_write_vaddr, len, 
765
                   cpu_single_env->eip, 
763
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764
                   cpu_single_env->mem_write_vaddr, len,
765
                   cpu_single_env->eip,
766 766
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767 767
        }
768 768
    }
769 769
#endif
770 770
    p = page_find(start >> TARGET_PAGE_BITS);
771
    if (!p) 
771
    if (!p)
772 772
        return;
773 773
    if (p->code_bitmap) {
774 774
        offset = start & ~TARGET_PAGE_MASK;
......
782 782
}
783 783

  
784 784
#if !defined(CONFIG_SOFTMMU)
785
static void tb_invalidate_phys_page(target_ulong addr, 
785
static void tb_invalidate_phys_page(target_ulong addr,
786 786
                                    unsigned long pc, void *puc)
787 787
{
788 788
    int n, current_flags, current_tb_modified;
......
795 795

  
796 796
    addr &= TARGET_PAGE_MASK;
797 797
    p = page_find(addr >> TARGET_PAGE_BITS);
798
    if (!p) 
798
    if (!p)
799 799
        return;
800 800
    tb = p->first_tb;
801 801
    current_tb_modified = 0;
......
819 819
                   that the modification is after the current PC, but it
820 820
                   would require a specialized function to partially
821 821
                   restore the CPU state */
822
            
822
           
823 823
            current_tb_modified = 1;
824 824
            cpu_restore_state(current_tb, env, pc, puc);
825 825
#if defined(TARGET_I386)
......
842 842
           modifying the memory. It will ensure that it cannot modify
843 843
           itself */
844 844
        env->current_tb = NULL;
845
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
845
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
846 846
                    CF_SINGLE_INSN);
847 847
        cpu_resume_from_signal(env, puc);
848 848
    }
......
851 851
#endif
852 852

  
853 853
/* add the tb in the target page and protect it if necessary */
854
static inline void tb_alloc_page(TranslationBlock *tb, 
854
static inline void tb_alloc_page(TranslationBlock *tb,
855 855
                                 unsigned int n, target_ulong page_addr)
856 856
{
857 857
    PageDesc *p;
......
886 886
            p2->flags &= ~PAGE_WRITE;
887 887
            page_get_flags(addr);
888 888
          }
889
        mprotect(g2h(page_addr), qemu_host_page_size, 
889
        mprotect(g2h(page_addr), qemu_host_page_size,
890 890
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
891 891
#ifdef DEBUG_TB_INVALIDATE
892
        printf("protecting code page: 0x%08lx\n", 
892
        printf("protecting code page: 0x%08lx\n",
893 893
               page_addr);
894 894
#endif
895 895
    }
......
911 911
{
912 912
    TranslationBlock *tb;
913 913

  
914
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
914
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915 915
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916 916
        return NULL;
917 917
    tb = &tbs[nb_tbs++];
......
922 922

  
923 923
/* add a new TB and link it to the physical page tables. phys_page2 is
924 924
   (-1) to indicate that only one page contains the TB. */
925
void tb_link_phys(TranslationBlock *tb, 
925
void tb_link_phys(TranslationBlock *tb,
926 926
                  target_ulong phys_pc, target_ulong phys_page2)
927 927
{
928 928
    unsigned int h;
......
988 988
        } else {
989 989
            m_min = m + 1;
990 990
        }
991
    } 
991
    }
992 992
    return &tbs[m_max];
993 993
}
994 994

  
......
1024 1024
        }
1025 1025
        *ptb = tb->jmp_next[n];
1026 1026
        tb->jmp_next[n] = NULL;
1027
        
1027
       
1028 1028
        /* suppress the jump to next tb in generated code */
1029 1029
        tb_reset_jump(tb, n);
1030 1030

  
......
1103 1103
{
1104 1104
#if defined(TARGET_HAS_ICE)
1105 1105
    int i;
1106
    
1106
   
1107 1107
    for(i = 0; i < env->nb_breakpoints; i++) {
1108 1108
        if (env->breakpoints[i] == pc)
1109 1109
            return 0;
......
1112 1112
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1113 1113
        return -1;
1114 1114
    env->breakpoints[env->nb_breakpoints++] = pc;
1115
    
1115
   
1116 1116
    breakpoint_invalidate(env, pc);
1117 1117
    return 0;
1118 1118
#else
......
1216 1216
}
1217 1217

  
1218 1218
CPULogItem cpu_log_items[] = {
1219
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1219
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1220 1220
      "show generated host assembly code for each compiled TB" },
1221 1221
    { CPU_LOG_TB_IN_ASM, "in_asm",
1222 1222
      "show target assembly code for each compiled TB" },
1223
    { CPU_LOG_TB_OP, "op", 
1223
    { CPU_LOG_TB_OP, "op",
1224 1224
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1225 1225
#ifdef TARGET_I386
1226 1226
    { CPU_LOG_TB_OP_OPT, "op_opt",
......
1249 1249
        return 0;
1250 1250
    return memcmp(s1, s2, n) == 0;
1251 1251
}
1252
      
1252
     
1253 1253
/* takes a comma separated list of log masks. Return 0 if error. */
1254 1254
int cpu_str_to_log_mask(const char *str)
1255 1255
{
......
1365 1365

  
1366 1366
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1367 1367
{
1368
    if (addr == (tlb_entry->addr_read & 
1368
    if (addr == (tlb_entry->addr_read &
1369 1369
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1370
        addr == (tlb_entry->addr_write & 
1370
        addr == (tlb_entry->addr_write &
1371 1371
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1372
        addr == (tlb_entry->addr_code & 
1372
        addr == (tlb_entry->addr_code &
1373 1373
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1374 1374
        tlb_entry->addr_read = -1;
1375 1375
        tlb_entry->addr_write = -1;
......
1423 1423
   can be detected */
1424 1424
static void tlb_protect_code(ram_addr_t ram_addr)
1425 1425
{
1426
    cpu_physical_memory_reset_dirty(ram_addr, 
1426
    cpu_physical_memory_reset_dirty(ram_addr,
1427 1427
                                    ram_addr + TARGET_PAGE_SIZE,
1428 1428
                                    CODE_DIRTY_FLAG);
1429 1429
}
1430 1430

  
1431 1431
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1432 1432
   tested for self modifying code */
1433
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1433
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1434 1434
                                    target_ulong vaddr)
1435 1435
{
1436 1436
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1437 1437
}
1438 1438

  
1439
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1439
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1440 1440
                                         unsigned long start, unsigned long length)
1441 1441
{
1442 1442
    unsigned long addr;
......
1514 1514
                        p->phys_addr >= start && p->phys_addr < end &&
1515 1515
                        (p->prot & PROT_WRITE)) {
1516 1516
                        if (addr < MMAP_AREA_END) {
1517
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1517
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1518 1518
                                     p->prot & ~PROT_WRITE);
1519 1519
                        }
1520 1520
                    }
......
1532 1532
    ram_addr_t ram_addr;
1533 1533

  
1534 1534
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1535
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1535
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1536 1536
            tlb_entry->addend - (unsigned long)phys_ram_base;
1537 1537
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1538 1538
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
......
1558 1558
#endif
1559 1559
}
1560 1560

  
1561
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1561
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1562 1562
                                  unsigned long start)
1563 1563
{
1564 1564
    unsigned long addr;
......
1593 1593
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1594 1594
   (can only happen in non SOFTMMU mode for I/O pages or pages
1595 1595
   conflicting with the host address space). */
1596
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1597
                      target_phys_addr_t paddr, int prot, 
1596
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1597
                      target_phys_addr_t paddr, int prot,
1598 1598
                      int is_user, int is_softmmu)
1599 1599
{
1600 1600
    PhysPageDesc *p;
......
1619 1619

  
1620 1620
    ret = 0;
1621 1621
#if !defined(CONFIG_SOFTMMU)
1622
    if (is_softmmu) 
1622
    if (is_softmmu)
1623 1623
#endif
1624 1624
    {
1625 1625
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
......
1664 1664
            te->addr_code = -1;
1665 1665
        }
1666 1666
        if (prot & PAGE_WRITE) {
1667
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1667
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1668 1668
                (pd & IO_MEM_ROMD)) {
1669 1669
                /* write access calls the I/O callback */
1670
                te->addr_write = vaddr | 
1670
                te->addr_write = vaddr |
1671 1671
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1672
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1672
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1673 1673
                       !cpu_physical_memory_is_dirty(pd)) {
1674 1674
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1675 1675
            } else {
......
1693 1693
                ret = 2;
1694 1694
            } else {
1695 1695
                if (prot & PROT_WRITE) {
1696
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1696
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1697 1697
#if defined(TARGET_HAS_SMC) || 1
1698 1698
                        first_tb ||
1699 1699
#endif
1700
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1700
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1701 1701
                         !cpu_physical_memory_is_dirty(pd))) {
1702 1702
                        /* ROM: we do as if code was inside */
1703 1703
                        /* if code is present, we only map as read only and save the
1704 1704
                           original mapping */
1705 1705
                        VirtPageDesc *vp;
1706
                        
1706
                       
1707 1707
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1708 1708
                        vp->phys_addr = pd;
1709 1709
                        vp->prot = prot;
......
1711 1711
                        prot &= ~PAGE_WRITE;
1712 1712
                    }
1713 1713
                }
1714
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1714
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1715 1715
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1716 1716
                if (map_addr == MAP_FAILED) {
1717 1717
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
......
1749 1749
    if (!(vp->prot & PAGE_WRITE))
1750 1750
        return 0;
1751 1751
#if defined(DEBUG_TLB)
1752
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1752
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1753 1753
           addr, vp->phys_addr, vp->prot);
1754 1754
#endif
1755 1755
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
......
1775 1775
{
1776 1776
}
1777 1777

  
1778
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1779
                      target_phys_addr_t paddr, int prot, 
1778
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1779
                      target_phys_addr_t paddr, int prot,
1780 1780
                      int is_user, int is_softmmu)
1781 1781
{
1782 1782
    return 0;
......
1808 1808
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1809 1809
                if (start != -1) {
1810 1810
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1811
                            start, end, end - start, 
1811
                            start, end, end - start,
1812 1812
                            prot & PAGE_READ ? 'r' : '-',
1813 1813
                            prot & PAGE_WRITE ? 'w' : '-',
1814 1814
                            prot & PAGE_EXEC ? 'x' : '-');
......
1852 1852
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1853 1853
        /* if the write protection is set, then we invalidate the code
1854 1854
           inside */
1855
        if (!(p->flags & PAGE_WRITE) && 
1855
        if (!(p->flags & PAGE_WRITE) &&
1856 1856
            (flags & PAGE_WRITE) &&
1857 1857
            p->first_tb) {
1858 1858
            tb_invalidate_phys_page(addr, 0, NULL);
......
1887 1887
    if (prot & PAGE_WRITE_ORG) {
1888 1888
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1889 1889
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1890
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1890
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1891 1891
                     (prot & PAGE_BITS) | PAGE_WRITE);
1892 1892
            p1[pindex].flags |= PAGE_WRITE;
1893 1893
            /* and since the content will be modified, we must invalidate
......
1950 1950
/* register physical memory. 'size' must be a multiple of the target
1951 1951
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1952 1952
   io memory page */
1953
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1953
void cpu_register_physical_memory(target_phys_addr_t start_addr,
1954 1954
                                  unsigned long size,
1955 1955
                                  unsigned long phys_offset)
1956 1956
{
......
2008 2008
            }
2009 2009
        }
2010 2010
    }
2011
    
2011
   
2012 2012
    /* since each CPU stores ram addresses in its TLB cache, we must
2013 2013
       reset the modified entries */
2014 2014
    /* XXX: slow ! */
......
2033 2033
{
2034 2034
    ram_addr_t addr;
2035 2035
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2036
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
2036
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2037 2037
                size, phys_ram_size);
2038 2038
        abort();
2039 2039
    }
......
2448 2448

  
2449 2449
/* physical memory access (slow version, mainly for debug) */
2450 2450
#if defined(CONFIG_USER_ONLY)
2451
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2451
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2452 2452
                            int len, int is_write)
2453 2453
{
2454 2454
    int l, flags;
......
2483 2483
}
2484 2484

  
2485 2485
#else
2486
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2486
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2487 2487
                            int len, int is_write)
2488 2488
{
2489 2489
    int l, io_index;
......
2492 2492
    target_phys_addr_t page;
2493 2493
    unsigned long pd;
2494 2494
    PhysPageDesc *p;
2495
    
2495
   
2496 2496
    while (len > 0) {
2497 2497
        page = addr & TARGET_PAGE_MASK;
2498 2498
        l = (page + TARGET_PAGE_SIZE) - addr;
......
2504 2504
        } else {
2505 2505
            pd = p->phys_offset;
2506 2506
        }
2507
        
2507
       
2508 2508
        if (is_write) {
2509 2509
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2510 2510
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
......
2536 2536
                    /* invalidate code */
2537 2537
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2538 2538
                    /* set dirty bit */
2539
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2539
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2540 2540
                        (0xff & ~CODE_DIRTY_FLAG);
2541 2541
                }
2542 2542
            }
2543 2543
        } else {
2544
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2544
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2545 2545
                !(pd & IO_MEM_ROMD)) {
2546 2546
                /* I/O case */
2547 2547
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
......
2563 2563
                }
2564 2564
            } else {
2565 2565
                /* RAM case */
2566
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2566
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2567 2567
                    (addr & ~TARGET_PAGE_MASK);
2568 2568
                memcpy(buf, ptr, l);
2569 2569
            }
......
2575 2575
}
2576 2576

  
2577 2577
/* used for ROM loading : can write in RAM and ROM */
2578
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2578
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2579 2579
                                   const uint8_t *buf, int len)
2580 2580
{
2581 2581
    int l;
......
2583 2583
    target_phys_addr_t page;
2584 2584
    unsigned long pd;
2585 2585
    PhysPageDesc *p;
2586
    
2586
   
2587 2587
    while (len > 0) {
2588 2588
        page = addr & TARGET_PAGE_MASK;
2589 2589
        l = (page + TARGET_PAGE_SIZE) - addr;
......
2595 2595
        } else {
2596 2596
            pd = p->phys_offset;
2597 2597
        }
2598
        
2598
       
2599 2599
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2600 2600
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2601 2601
            !(pd & IO_MEM_ROMD)) {
......
2629 2629
    } else {
2630 2630
        pd = p->phys_offset;
2631 2631
    }
2632
        
2633
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2632
       
2633
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2634 2634
        !(pd & IO_MEM_ROMD)) {
2635 2635
        /* I/O case */
2636 2636
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2637 2637
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2638 2638
    } else {
2639 2639
        /* RAM case */
2640
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2640
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2641 2641
            (addr & ~TARGET_PAGE_MASK);
2642 2642
        val = ldl_p(ptr);
2643 2643
    }
......
2659 2659
    } else {
2660 2660
        pd = p->phys_offset;
2661 2661
    }
2662
        
2662
       
2663 2663
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2664 2664
        !(pd & IO_MEM_ROMD)) {
2665 2665
        /* I/O case */
......
2673 2673
#endif
2674 2674
    } else {
2675 2675
        /* RAM case */
2676
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2676
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2677 2677
            (addr & ~TARGET_PAGE_MASK);
2678 2678
        val = ldq_p(ptr);
2679 2679
    }
......
2712 2712
    } else {
2713 2713
        pd = p->phys_offset;
2714 2714
    }
2715
        
2715
       
2716 2716
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2717 2717
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2718 2718
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2719 2719
    } else {
2720
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2720
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2721 2721
            (addr & ~TARGET_PAGE_MASK);
2722 2722
        stl_p(ptr, val);
2723 2723
    }
......
2736 2736
    } else {
2737 2737
        pd = p->phys_offset;
2738 2738
    }
2739
        
2739
       
2740 2740
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2741 2741
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2742 2742
#ifdef TARGET_WORDS_BIGENDIAN
......
2747 2747
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2748 2748
#endif
2749 2749
    } else {
2750
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2750
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2751 2751
            (addr & ~TARGET_PAGE_MASK);
2752 2752
        stq_p(ptr, val);
2753 2753
    }
......
2767 2767
    } else {
2768 2768
        pd = p->phys_offset;
2769 2769
    }
2770
        
2770
       
2771 2771
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2772 2772
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2773 2773
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
......
2811 2811
#endif
2812 2812

  
2813 2813
/* virtual memory access for debug */
2814
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2814
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2815 2815
                        uint8_t *buf, int len, int is_write)
2816 2816
{
2817 2817
    int l;
......
2827 2827
        l = (page + TARGET_PAGE_SIZE) - addr;
2828 2828
        if (l > len)
2829 2829
            l = len;
2830
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2830
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2831 2831
                               buf, l, is_write);
2832 2832
        len -= l;
2833 2833
        buf += l;
......
2842 2842
    int i, target_code_size, max_target_code_size;
2843 2843
    int direct_jmp_count, direct_jmp2_count, cross_page;
2844 2844
    TranslationBlock *tb;
2845
    
2845
   
2846 2846
    target_code_size = 0;
2847 2847
    max_target_code_size = 0;
2848 2848
    cross_page = 0;
......
2864 2864
    }
2865 2865
    /* XXX: avoid using doubles ? */
2866 2866
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2867
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2867
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2868 2868
                nb_tbs ? target_code_size / nb_tbs : 0,
2869 2869
                max_target_code_size);
2870
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2870
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2871 2871
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2872 2872
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2873
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2874
            cross_page, 
2873
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2874
            cross_page,
2875 2875
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2876 2876
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2877
                direct_jmp_count, 
2877
                direct_jmp_count,
2878 2878
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2879 2879
                direct_jmp2_count,
2880 2880
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
......
2883 2883
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2884 2884
}
2885 2885

  
2886
#if !defined(CONFIG_USER_ONLY) 
2886
#if !defined(CONFIG_USER_ONLY)
2887 2887

  
2888 2888
#define MMUSUFFIX _cmmu
2889 2889
#define GETPC() NULL

Also available in: Unified diff