Revision 461c0471

b/target-i386/cpu.h
389 389
extern int phys_ram_size;
390 390
extern int phys_ram_fd;
391 391
extern uint8_t *phys_ram_base;
392
extern int a20_enabled;
393

  
394
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
392 395

  
393 396
/* used to debug */
394 397
#define X86_DUMP_FPU  0x0001 /* dump FPU state too */
b/target-i386/helper2.c
158 158
/* called when cr3 or PG bit are modified */
159 159
static int last_pg_state = -1;
160 160
static int last_pe_state = 0;
161
static uint32_t a20_mask;
162
int a20_enabled;
163

  
161 164
int phys_ram_size;
162 165
int phys_ram_fd;
163 166
uint8_t *phys_ram_base;
164 167

  
168
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
169
{
170
    a20_state = (a20_state != 0);
171
    if (a20_state != a20_enabled) {
172
        /* when a20 is changed, all the MMU mappings are invalid, so
173
           we must flush everything */
174
        page_unmap();
175
        tlb_flush(env);
176
        a20_enabled = a20_state;
177
        if (a20_enabled)
178
            a20_mask = 0xffffffff;
179
        else
180
            a20_mask = 0xffefffff;
181
    }
182
}
183

  
165 184
void cpu_x86_update_cr0(CPUX86State *env)
166 185
{
167 186
    int pg_state, pe_state;
......
195 214

  
196 215
void cpu_x86_init_mmu(CPUX86State *env)
197 216
{
217
    a20_enabled = 1;
218
    a20_mask = 0xffffffff;
219

  
198 220
    last_pg_state = -1;
199 221
    cpu_x86_update_cr0(env);
200 222
}
......
244 266

  
245 267
    if (!(env->cr[0] & CR0_PG_MASK)) {
246 268
        pte = addr;
247
        virt_addr = addr & ~0xfff;
269
        virt_addr = addr & TARGET_PAGE_MASK;
248 270
        prot = PROT_READ | PROT_WRITE;
249 271
        page_size = 4096;
250 272
        goto do_mapping;
251 273
    }
252 274

  
253 275
    /* page directory entry */
254
    pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3));
276
    pde_ptr = phys_ram_base + 
277
        (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & a20_mask);
255 278
    pde = ldl_raw(pde_ptr);
256 279
    if (!(pde & PG_PRESENT_MASK)) {
257 280
        error_code = 0;
......
287 310
        }
288 311

  
289 312
        /* page directory entry */
290
        pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc));
313
        pte_ptr = phys_ram_base + 
314
            (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask);
291 315
        pte = ldl_raw(pte_ptr);
292 316
        if (!(pte & PG_PRESENT_MASK)) {
293 317
            error_code = 0;
......
325 349
    }
326 350
    
327 351
 do_mapping:
352
    pte = pte & a20_mask;
328 353
#if !defined(CONFIG_SOFTMMU)
329 354
    if (is_softmmu) 
330 355
#endif
......
334 359

  
335 360
        /* software MMU case. Even if 4MB pages, we map only one 4KB
336 361
           page in the cache to avoid filling it too fast */
337
        page_offset = (addr & ~0xfff) & (page_size - 1);
338
        paddr = (pte & ~0xfff) + page_offset;
362
        page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
363
        paddr = (pte & TARGET_PAGE_MASK) + page_offset;
339 364
        vaddr = virt_addr + page_offset;
340 365
        index = (addr >> 12) & (CPU_TLB_SIZE - 1);
341 366
        pd = physpage_find(paddr);

Also available in: Unified diff