Statistics
| Branch: | Revision:

root / target-i386 / helper2.c @ 7496f526

History | View | Annotate | Download (15.9 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include <stdarg.h>
21
#include <stdlib.h>
22
#include <stdio.h>
23
#include <string.h>
24
#include <inttypes.h>
25
#include <signal.h>
26
#include <assert.h>
27

    
28
#include "cpu.h"
29
#include "exec-all.h"
30

    
31
//#define DEBUG_MMU
32

    
33
#ifdef USE_CODE_COPY
34
#include <asm/ldt.h>
35
#include <linux/unistd.h>
36
#include <linux/version.h>
37

    
38
_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39

    
40
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41
#define modify_ldt_ldt_s user_desc
42
#endif
43
#endif /* USE_CODE_COPY */
44

    
45
CPUX86State *cpu_x86_init(void)
46
{
47
    CPUX86State *env;
48
    static int inited;
49

    
50
    cpu_exec_init();
51

    
52
    env = malloc(sizeof(CPUX86State));
53
    if (!env)
54
        return NULL;
55
    memset(env, 0, sizeof(CPUX86State));
56
    /* init various static tables */
57
    if (!inited) {
58
        inited = 1;
59
        optimize_flags_init();
60
    }
61
#ifdef USE_CODE_COPY
62
    /* testing code for code copy case */
63
    {
64
        struct modify_ldt_ldt_s ldt;
65

    
66
        ldt.entry_number = 1;
67
        ldt.base_addr = (unsigned long)env;
68
        ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69
        ldt.seg_32bit = 1;
70
        ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71
        ldt.read_exec_only = 0;
72
        ldt.limit_in_pages = 1;
73
        ldt.seg_not_present = 0;
74
        ldt.useable = 1;
75
        modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76
        
77
        asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78
    }
79
#endif
80
    cpu_single_env = env;
81
    cpu_reset(env);
82
    return env;
83
}
84

    
85
/* NOTE: must be called outside the CPU execute loop */
86
void cpu_reset(CPUX86State *env)
87
{
88
    int i;
89

    
90
    memset(env, 0, offsetof(CPUX86State, breakpoints));
91

    
92
    tlb_flush(env, 1);
93

    
94
    /* init to reset state */
95

    
96
#ifdef CONFIG_SOFTMMU
97
    env->hflags |= HF_SOFTMMU_MASK;
98
#endif
99

    
100
    cpu_x86_update_cr0(env, 0x60000010);
101
    env->a20_mask = 0xffffffff;
102
    
103
    env->idt.limit = 0xffff;
104
    env->gdt.limit = 0xffff;
105
    env->ldt.limit = 0xffff;
106
    env->ldt.flags = DESC_P_MASK;
107
    env->tr.limit = 0xffff;
108
    env->tr.flags = DESC_P_MASK;
109
    
110
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0xffff0000, 0xffff, 0); 
111
    cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
112
    cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
113
    cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
114
    cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
115
    cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
116
    
117
    env->eip = 0xfff0;
118
    env->regs[R_EDX] = 0x600; /* indicate P6 processor */
119
    
120
    env->eflags = 0x2;
121
    
122
    /* FPU init */
123
    for(i = 0;i < 8; i++)
124
        env->fptags[i] = 1;
125
    env->fpuc = 0x37f;
126
}
127

    
128
void cpu_x86_close(CPUX86State *env)
129
{
130
    free(env);
131
}
132

    
133
/***********************************************************/
134
/* x86 debug */
135

    
136
static const char *cc_op_str[] = {
137
    "DYNAMIC",
138
    "EFLAGS",
139
    "MULB",
140
    "MULW",
141
    "MULL",
142
    "ADDB",
143
    "ADDW",
144
    "ADDL",
145
    "ADCB",
146
    "ADCW",
147
    "ADCL",
148
    "SUBB",
149
    "SUBW",
150
    "SUBL",
151
    "SBBB",
152
    "SBBW",
153
    "SBBL",
154
    "LOGICB",
155
    "LOGICW",
156
    "LOGICL",
157
    "INCB",
158
    "INCW",
159
    "INCL",
160
    "DECB",
161
    "DECW",
162
    "DECL",
163
    "SHLB",
164
    "SHLW",
165
    "SHLL",
166
    "SARB",
167
    "SARW",
168
    "SARL",
169
};
170

    
171
void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags)
172
{
173
    int eflags, i;
174
    char cc_op_name[32];
175
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
176

    
177
    eflags = env->eflags;
178
    fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
179
            "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
180
            "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]    CPL=%d II=%d A20=%d\n",
181
            env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX], 
182
            env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP], 
183
            env->eip, eflags,
184
            eflags & DF_MASK ? 'D' : '-',
185
            eflags & CC_O ? 'O' : '-',
186
            eflags & CC_S ? 'S' : '-',
187
            eflags & CC_Z ? 'Z' : '-',
188
            eflags & CC_A ? 'A' : '-',
189
            eflags & CC_P ? 'P' : '-',
190
            eflags & CC_C ? 'C' : '-',
191
            env->hflags & HF_CPL_MASK, 
192
            (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
193
            (env->a20_mask >> 20) & 1);
194
    for(i = 0; i < 6; i++) {
195
        SegmentCache *sc = &env->segs[i];
196
        fprintf(f, "%s =%04x %08x %08x %08x\n",
197
                seg_name[i],
198
                sc->selector,
199
                (int)sc->base,
200
                sc->limit,
201
                sc->flags);
202
    }
203
    fprintf(f, "LDT=%04x %08x %08x %08x\n",
204
            env->ldt.selector,
205
            (int)env->ldt.base,
206
            env->ldt.limit,
207
            env->ldt.flags);
208
    fprintf(f, "TR =%04x %08x %08x %08x\n",
209
            env->tr.selector,
210
            (int)env->tr.base,
211
            env->tr.limit,
212
            env->tr.flags);
213
    fprintf(f, "GDT=     %08x %08x\n",
214
            (int)env->gdt.base, env->gdt.limit);
215
    fprintf(f, "IDT=     %08x %08x\n",
216
            (int)env->idt.base, env->idt.limit);
217
    fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
218
            env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
219
    
220
    if (flags & X86_DUMP_CCOP) {
221
        if ((unsigned)env->cc_op < CC_OP_NB)
222
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
223
        else
224
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
225
        fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
226
                env->cc_src, env->cc_dst, cc_op_name);
227
    }
228
    if (flags & X86_DUMP_FPU) {
229
        fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n", 
230
                (double)env->fpregs[0], 
231
                (double)env->fpregs[1], 
232
                (double)env->fpregs[2], 
233
                (double)env->fpregs[3]);
234
        fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n", 
235
                (double)env->fpregs[4], 
236
                (double)env->fpregs[5], 
237
                (double)env->fpregs[7], 
238
                (double)env->fpregs[8]);
239
    }
240
}
241

    
242
/***********************************************************/
243
/* x86 mmu */
244
/* XXX: add PGE support */
245

    
246
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
247
{
248
    a20_state = (a20_state != 0);
249
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
250
#if defined(DEBUG_MMU)
251
        printf("A20 update: a20=%d\n", a20_state);
252
#endif
253
        /* if the cpu is currently executing code, we must unlink it and
254
           all the potentially executing TB */
255
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
256

    
257
        /* when a20 is changed, all the MMU mappings are invalid, so
258
           we must flush everything */
259
        tlb_flush(env, 1);
260
        env->a20_mask = 0xffefffff | (a20_state << 20);
261
    }
262
}
263

    
264
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
265
{
266
    int pe_state;
267

    
268
#if defined(DEBUG_MMU)
269
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
270
#endif
271
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
272
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
273
        tlb_flush(env, 1);
274
    }
275
    env->cr[0] = new_cr0 | CR0_ET_MASK;
276
    
277
    /* update PE flag in hidden flags */
278
    pe_state = (env->cr[0] & CR0_PE_MASK);
279
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
280
    /* ensure that ADDSEG is always set in real mode */
281
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
282
    /* update FPU flags */
283
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
284
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
285
}
286

    
287
void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
288
{
289
    env->cr[3] = new_cr3;
290
    if (env->cr[0] & CR0_PG_MASK) {
291
#if defined(DEBUG_MMU)
292
        printf("CR3 update: CR3=%08x\n", new_cr3);
293
#endif
294
        tlb_flush(env, 0);
295
    }
296
}
297

    
298
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
299
{
300
#if defined(DEBUG_MMU)
301
    printf("CR4 update: CR4=%08x\n", env->cr[4]);
302
#endif
303
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
304
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
305
        tlb_flush(env, 1);
306
    }
307
    env->cr[4] = new_cr4;
308
}
309

    
310
/* XXX: also flush 4MB pages */
311
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
312
{
313
    tlb_flush_page(env, addr);
314
}
315

    
316
/* return value:
317
   -1 = cannot handle fault 
318
   0  = nothing more to do 
319
   1  = generate PF fault
320
   2  = soft MMU activation required for this block
321
*/
322
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, 
323
                             int is_write, int is_user, int is_softmmu)
324
{
325
    uint8_t *pde_ptr, *pte_ptr;
326
    uint32_t pde, pte, virt_addr, ptep;
327
    int error_code, is_dirty, prot, page_size, ret;
328
    unsigned long paddr, vaddr, page_offset;
329
    
330
#if defined(DEBUG_MMU)
331
    printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", 
332
           addr, is_write, is_user, env->eip);
333
#endif
334

    
335
    if (env->user_mode_only) {
336
        /* user mode only emulation */
337
        error_code = 0;
338
        goto do_fault;
339
    }
340

    
341
    if (!(env->cr[0] & CR0_PG_MASK)) {
342
        pte = addr;
343
        virt_addr = addr & TARGET_PAGE_MASK;
344
        prot = PAGE_READ | PAGE_WRITE;
345
        page_size = 4096;
346
        goto do_mapping;
347
    }
348

    
349
    /* page directory entry */
350
    pde_ptr = phys_ram_base + 
351
        (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
352
    pde = ldl_raw(pde_ptr);
353
    if (!(pde & PG_PRESENT_MASK)) {
354
        error_code = 0;
355
        goto do_fault;
356
    }
357
    /* if PSE bit is set, then we use a 4MB page */
358
    if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
359
        if (is_user) {
360
            if (!(pde & PG_USER_MASK))
361
                goto do_fault_protect;
362
            if (is_write && !(pde & PG_RW_MASK))
363
                goto do_fault_protect;
364
        } else {
365
            if ((env->cr[0] & CR0_WP_MASK) && 
366
                is_write && !(pde & PG_RW_MASK)) 
367
                goto do_fault_protect;
368
        }
369
        is_dirty = is_write && !(pde & PG_DIRTY_MASK);
370
        if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
371
            pde |= PG_ACCESSED_MASK;
372
            if (is_dirty)
373
                pde |= PG_DIRTY_MASK;
374
            stl_raw(pde_ptr, pde);
375
        }
376
        
377
        pte = pde & ~0x003ff000; /* align to 4MB */
378
        ptep = pte;
379
        page_size = 4096 * 1024;
380
        virt_addr = addr & ~0x003fffff;
381
    } else {
382
        if (!(pde & PG_ACCESSED_MASK)) {
383
            pde |= PG_ACCESSED_MASK;
384
            stl_raw(pde_ptr, pde);
385
        }
386

    
387
        /* page directory entry */
388
        pte_ptr = phys_ram_base + 
389
            (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
390
        pte = ldl_raw(pte_ptr);
391
        if (!(pte & PG_PRESENT_MASK)) {
392
            error_code = 0;
393
            goto do_fault;
394
        }
395
        /* combine pde and pte user and rw protections */
396
        ptep = pte & pde;
397
        if (is_user) {
398
            if (!(ptep & PG_USER_MASK))
399
                goto do_fault_protect;
400
            if (is_write && !(ptep & PG_RW_MASK))
401
                goto do_fault_protect;
402
        } else {
403
            if ((env->cr[0] & CR0_WP_MASK) &&
404
                is_write && !(ptep & PG_RW_MASK)) 
405
                goto do_fault_protect;
406
        }
407
        is_dirty = is_write && !(pte & PG_DIRTY_MASK);
408
        if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
409
            pte |= PG_ACCESSED_MASK;
410
            if (is_dirty)
411
                pte |= PG_DIRTY_MASK;
412
            stl_raw(pte_ptr, pte);
413
        }
414
        page_size = 4096;
415
        virt_addr = addr & ~0xfff;
416
    }
417

    
418
    /* the page can be put in the TLB */
419
    prot = PAGE_READ;
420
    if (pte & PG_DIRTY_MASK) {
421
        /* only set write access if already dirty... otherwise wait
422
           for dirty access */
423
        if (is_user) {
424
            if (ptep & PG_RW_MASK)
425
                prot |= PAGE_WRITE;
426
        } else {
427
            if (!(env->cr[0] & CR0_WP_MASK) ||
428
                (ptep & PG_RW_MASK))
429
                prot |= PAGE_WRITE;
430
        }
431
    }
432

    
433
 do_mapping:
434
    pte = pte & env->a20_mask;
435

    
436
    /* Even if 4MB pages, we map only one 4KB page in the cache to
437
       avoid filling it too fast */
438
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
439
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
440
    vaddr = virt_addr + page_offset;
441
    
442
    ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
443
    return ret;
444
 do_fault_protect:
445
    error_code = PG_ERROR_P_MASK;
446
 do_fault:
447
    env->cr[2] = addr;
448
    env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
449
    if (is_user)
450
        env->error_code |= PG_ERROR_U_MASK;
451
    return 1;
452
}
453

    
454
#if defined(CONFIG_USER_ONLY) 
455
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
456
{
457
    return addr;
458
}
459
#else
460
target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
461
{
462
    uint8_t *pde_ptr, *pte_ptr;
463
    uint32_t pde, pte, paddr, page_offset, page_size;
464

    
465
    if (!(env->cr[0] & CR0_PG_MASK)) {
466
        pte = addr;
467
        page_size = 4096;
468
    } else {
469
        /* page directory entry */
470
        pde_ptr = phys_ram_base + 
471
            (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
472
        pde = ldl_raw(pde_ptr);
473
        if (!(pde & PG_PRESENT_MASK)) 
474
            return -1;
475
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
476
            pte = pde & ~0x003ff000; /* align to 4MB */
477
            page_size = 4096 * 1024;
478
        } else {
479
            /* page directory entry */
480
            pte_ptr = phys_ram_base + 
481
                (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
482
            pte = ldl_raw(pte_ptr);
483
            if (!(pte & PG_PRESENT_MASK))
484
                return -1;
485
            page_size = 4096;
486
        }
487
    }
488
    pte = pte & env->a20_mask;
489
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
490
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
491
    return paddr;
492
}
493
#endif
494

    
495
#if defined(USE_CODE_COPY)
496
struct fpstate {
497
    uint16_t fpuc;
498
    uint16_t dummy1;
499
    uint16_t fpus;
500
    uint16_t dummy2;
501
    uint16_t fptag;
502
    uint16_t dummy3;
503

    
504
    uint32_t fpip;
505
    uint32_t fpcs;
506
    uint32_t fpoo;
507
    uint32_t fpos;
508
    uint8_t fpregs1[8 * 10];
509
};
510

    
511
void restore_native_fp_state(CPUState *env)
512
{
513
    int fptag, i, j;
514
    struct fpstate fp1, *fp = &fp1;
515
    
516
    fp->fpuc = env->fpuc;
517
    fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
518
    fptag = 0;
519
    for (i=7; i>=0; i--) {
520
        fptag <<= 2;
521
        if (env->fptags[i]) {
522
            fptag |= 3;
523
        } else {
524
            /* the FPU automatically computes it */
525
        }
526
    }
527
    fp->fptag = fptag;
528
    j = env->fpstt;
529
    for(i = 0;i < 8; i++) {
530
        memcpy(&fp->fpregs1[i * 10], &env->fpregs[j], 10);
531
        j = (j + 1) & 7;
532
    }
533
    asm volatile ("frstor %0" : "=m" (*fp));
534
    env->native_fp_regs = 1;
535
}
536
 
537
void save_native_fp_state(CPUState *env)
538
{
539
    int fptag, i, j;
540
    uint16_t fpuc;
541
    struct fpstate fp1, *fp = &fp1;
542

    
543
    asm volatile ("fsave %0" : : "m" (*fp));
544
    env->fpuc = fp->fpuc;
545
    env->fpstt = (fp->fpus >> 11) & 7;
546
    env->fpus = fp->fpus & ~0x3800;
547
    fptag = fp->fptag;
548
    for(i = 0;i < 8; i++) {
549
        env->fptags[i] = ((fptag & 3) == 3);
550
        fptag >>= 2;
551
    }
552
    j = env->fpstt;
553
    for(i = 0;i < 8; i++) {
554
        memcpy(&env->fpregs[j], &fp->fpregs1[i * 10], 10);
555
        j = (j + 1) & 7;
556
    }
557
    /* we must restore the default rounding state */
558
    /* XXX: we do not restore the exception state */
559
    fpuc = 0x037f | (env->fpuc & (3 << 10));
560
    asm volatile("fldcw %0" : : "m" (fpuc));
561
    env->native_fp_regs = 0;
562
}
563
#endif