Revision f7c11b53 exec.c

b/exec.c
2030 2030
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2031 2031
                                    target_ulong vaddr)
2032 2032
{
2033
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2033
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2034 2034
}
2035 2035

  
2036 2036
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
......
2051 2051
{
2052 2052
    CPUState *env;
2053 2053
    unsigned long length, start1;
2054
    int i, mask, len;
2055
    uint8_t *p;
2054
    int i;
2056 2055

  
2057 2056
    start &= TARGET_PAGE_MASK;
2058 2057
    end = TARGET_PAGE_ALIGN(end);
......
2060 2059
    length = end - start;
2061 2060
    if (length == 0)
2062 2061
        return;
2063
    len = length >> TARGET_PAGE_BITS;
2064
    mask = ~dirty_flags;
2065
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2066
    for(i = 0; i < len; i++)
2067
        p[i] &= mask;
2062
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2068 2063

  
2069 2064
    /* we modify the TLB cache so that the dirty bit will be set again
2070 2065
       when accessing the range */
......
2986 2981
                                uint32_t val)
2987 2982
{
2988 2983
    int dirty_flags;
2989
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2984
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2990 2985
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2991 2986
#if !defined(CONFIG_USER_ONLY)
2992 2987
        tb_invalidate_phys_page_fast(ram_addr, 1);
2993
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2988
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2994 2989
#endif
2995 2990
    }
2996 2991
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2997 2992
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2998
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2993
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2999 2994
    /* we remove the notdirty callback only if the code has been
3000 2995
       flushed */
3001 2996
    if (dirty_flags == 0xff)
......
3006 3001
                                uint32_t val)
3007 3002
{
3008 3003
    int dirty_flags;
3009
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3004
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3010 3005
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3011 3006
#if !defined(CONFIG_USER_ONLY)
3012 3007
        tb_invalidate_phys_page_fast(ram_addr, 2);
3013
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3008
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3014 3009
#endif
3015 3010
    }
3016 3011
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3017 3012
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3018
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3013
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3019 3014
    /* we remove the notdirty callback only if the code has been
3020 3015
       flushed */
3021 3016
    if (dirty_flags == 0xff)
......
3026 3021
                                uint32_t val)
3027 3022
{
3028 3023
    int dirty_flags;
3029
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3024
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3030 3025
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3031 3026
#if !defined(CONFIG_USER_ONLY)
3032 3027
        tb_invalidate_phys_page_fast(ram_addr, 4);
3033
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
3028
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3034 3029
#endif
3035 3030
    }
3036 3031
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3037 3032
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3038
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
3033
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3039 3034
    /* we remove the notdirty callback only if the code has been
3040 3035
       flushed */
3041 3036
    if (dirty_flags == 0xff)
......
3486 3481
                    /* invalidate code */
3487 3482
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3488 3483
                    /* set dirty bit */
3489
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3490
                        (0xff & ~CODE_DIRTY_FLAG);
3484
                    cpu_physical_memory_set_dirty_flags(
3485
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3491 3486
                }
3492 3487
            }
3493 3488
        } else {
......
3693 3688
                    /* invalidate code */
3694 3689
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3695 3690
                    /* set dirty bit */
3696
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3697
                        (0xff & ~CODE_DIRTY_FLAG);
3691
                    cpu_physical_memory_set_dirty_flags(
3692
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3698 3693
                }
3699 3694
                addr1 += l;
3700 3695
                access_len -= l;
......
3828 3823
                /* invalidate code */
3829 3824
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3830 3825
                /* set dirty bit */
3831
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3832
                    (0xff & ~CODE_DIRTY_FLAG);
3826
                cpu_physical_memory_set_dirty_flags(
3827
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
3833 3828
            }
3834 3829
        }
3835 3830
    }
......
3897 3892
            /* invalidate code */
3898 3893
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3899 3894
            /* set dirty bit */
3900
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3901
                (0xff & ~CODE_DIRTY_FLAG);
3895
            cpu_physical_memory_set_dirty_flags(addr1,
3896
                (0xff & ~CODE_DIRTY_FLAG));
3902 3897
        }
3903 3898
    }
3904 3899
}

Also available in: Unified diff