Statistics
| Branch: | Revision:

root / target-sparc / helper.c @ 0fa85d43

History | View | Annotate | Download (12.2 kB)

1
/*
2
 *  sparc helpers
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23
//#define DEBUG_MMU
24

    
25
/* Sparc MMU emulation */
26
int cpu_sparc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
27
                              int is_user, int is_softmmu);
28

    
29
/* thread support */
30

    
31
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
32

    
33
void cpu_lock(void)
34
{
35
    spin_lock(&global_cpu_lock);
36
}
37

    
38
void cpu_unlock(void)
39
{
40
    spin_unlock(&global_cpu_lock);
41
}
42

    
43
#if !defined(CONFIG_USER_ONLY) 
44

    
45
#define MMUSUFFIX _mmu
46
#define GETPC() (__builtin_return_address(0))
47

    
48
#define SHIFT 0
49
#include "softmmu_template.h"
50

    
51
#define SHIFT 1
52
#include "softmmu_template.h"
53

    
54
#define SHIFT 2
55
#include "softmmu_template.h"
56

    
57
#define SHIFT 3
58
#include "softmmu_template.h"
59

    
60

    
61
/* try to fill the TLB and return an exception if error. If retaddr is
62
   NULL, it means that the function was called in C code (i.e. not
63
   from generated code or from helper.c) */
64
/* XXX: fix it to restore all registers */
65
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
66
{
67
    TranslationBlock *tb;
68
    int ret;
69
    unsigned long pc;
70
    CPUState *saved_env;
71

    
72
    /* XXX: hack to restore env in all cases, even if not called from
73
       generated code */
74
    saved_env = env;
75
    env = cpu_single_env;
76

    
77
    ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, is_user, 1);
78
    if (ret) {
79
        if (retaddr) {
80
            /* now we have a real cpu fault */
81
            pc = (unsigned long)retaddr;
82
            tb = tb_find_pc(pc);
83
            if (tb) {
84
                /* the PC is inside the translated code. It means that we have
85
                   a virtual CPU fault */
86
                cpu_restore_state(tb, env, pc, NULL);
87
            }
88
        }
89
        raise_exception_err(ret, env->error_code);
90
    }
91
    env = saved_env;
92
}
93
#endif
94

    
95
static const int access_table[8][8] = {
96
    { 0, 0, 0, 0, 2, 0, 3, 3 },
97
    { 0, 0, 0, 0, 2, 0, 0, 0 },
98
    { 2, 2, 0, 0, 0, 2, 3, 3 },
99
    { 2, 2, 0, 0, 0, 2, 0, 0 },
100
    { 2, 0, 2, 0, 2, 2, 3, 3 },
101
    { 2, 0, 2, 0, 2, 0, 2, 0 },
102
    { 2, 2, 2, 0, 2, 2, 3, 3 },
103
    { 2, 2, 2, 0, 2, 2, 2, 0 }
104
};
105

    
106
/* 1 = write OK */
107
static const int rw_table[2][8] = {
108
    { 0, 1, 0, 1, 0, 1, 0, 1 },
109
    { 0, 1, 0, 1, 0, 0, 0, 0 }
110
};
111

    
112
int get_physical_address (CPUState *env, uint32_t *physical, int *prot,
113
                          int *access_index, uint32_t address, int rw,
114
                          int is_user)
115
{
116
    int access_perms = 0;
117
    target_phys_addr_t pde_ptr;
118
    uint32_t pde, virt_addr;
119
    int error_code = 0, is_dirty;
120
    unsigned long page_offset;
121

    
122
    virt_addr = address & TARGET_PAGE_MASK;
123
    if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
124
        *physical = address;
125
        *prot = PAGE_READ | PAGE_WRITE;
126
        return 0;
127
    }
128

    
129
    /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
130
    /* Context base + context number */
131
    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 4);
132
    cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
133
    bswap32s(&pde);
134

    
135
    /* Ctx pde */
136
    switch (pde & PTE_ENTRYTYPE_MASK) {
137
    default:
138
    case 0: /* Invalid */
139
        return 1;
140
    case 2: /* L0 PTE, maybe should not happen? */
141
    case 3: /* Reserved */
142
        return 4;
143
    case 1: /* L0 PDE */
144
        pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
145
        cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
146
        bswap32s(&pde);
147

    
148
        switch (pde & PTE_ENTRYTYPE_MASK) {
149
        default:
150
        case 0: /* Invalid */
151
            return 1;
152
        case 3: /* Reserved */
153
            return 4;
154
        case 1: /* L1 PDE */
155
            pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
156
            cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
157
            bswap32s(&pde);
158

    
159
            switch (pde & PTE_ENTRYTYPE_MASK) {
160
            default:
161
            case 0: /* Invalid */
162
                return 1;
163
            case 3: /* Reserved */
164
                return 4;
165
            case 1: /* L2 PDE */
166
                pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
167
                cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
168
                bswap32s(&pde);
169

    
170
                switch (pde & PTE_ENTRYTYPE_MASK) {
171
                default:
172
                case 0: /* Invalid */
173
                    return 1;
174
                case 1: /* PDE, should not happen */
175
                case 3: /* Reserved */
176
                    return 4;
177
                case 2: /* L3 PTE */
178
                    virt_addr = address & TARGET_PAGE_MASK;
179
                    page_offset = (address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1);
180
                }
181
                break;
182
            case 2: /* L2 PTE */
183
                virt_addr = address & ~0x3ffff;
184
                page_offset = address & 0x3ffff;
185
            }
186
            break;
187
        case 2: /* L1 PTE */
188
            virt_addr = address & ~0xffffff;
189
            page_offset = address & 0xffffff;
190
        }
191
    }
192

    
193
    /* update page modified and dirty bits */
194
    is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
195
    if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
196
        uint32_t tmppde;
197
        pde |= PG_ACCESSED_MASK;
198
        if (is_dirty)
199
            pde |= PG_MODIFIED_MASK;
200
        tmppde = bswap32(pde);
201
        cpu_physical_memory_write(pde_ptr, (uint8_t *)&tmppde, 4);
202
    }
203
    /* check access */
204
    *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1);
205
    access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
206
    error_code = access_table[*access_index][access_perms];
207
    if (error_code)
208
        return error_code;
209

    
210
    /* the page can be put in the TLB */
211
    *prot = PAGE_READ;
212
    if (pde & PG_MODIFIED_MASK) {
213
        /* only set write access if already dirty... otherwise wait
214
           for dirty access */
215
        if (rw_table[is_user][access_perms])
216
                *prot |= PAGE_WRITE;
217
    }
218

    
219
    /* Even if large ptes, we map only one 4KB page in the cache to
220
       avoid filling it too fast */
221
    *physical = ((pde & PTE_ADDR_MASK) << 4) + page_offset;
222
    return 0;
223
}
224

    
225
/* Perform address translation */
226
int cpu_sparc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
227
                              int is_user, int is_softmmu)
228
{
229
    int exception = 0;
230
    uint32_t virt_addr, paddr;
231
    unsigned long vaddr;
232
    int error_code = 0, prot, ret = 0, access_index;
233

    
234
    if (env->user_mode_only) {
235
        /* user mode only emulation */
236
        error_code = -2;
237
        goto do_fault_user;
238
    }
239

    
240
    error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, is_user);
241
    if (error_code == 0) {
242
        virt_addr = address & TARGET_PAGE_MASK;
243
        vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1));
244
        ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
245
        return ret;
246
    }
247

    
248
    if (env->mmuregs[3]) /* Fault status register */
249
        env->mmuregs[3] = 1; /* overflow (not read before another fault) */
250
    env->mmuregs[3] |= (access_index << 5) | (error_code << 2) | 2;
251
    env->mmuregs[4] = address; /* Fault address register */
252

    
253
    if (env->mmuregs[0] & MMU_NF || env->psret == 0) // No fault
254
        return 0;
255
 do_fault_user:
256
    env->exception_index = exception;
257
    env->error_code = error_code;
258
    return error_code;
259
}
260

    
261
void memcpy32(uint32_t *dst, const uint32_t *src)
262
{
263
    dst[0] = src[0];
264
    dst[1] = src[1];
265
    dst[2] = src[2];
266
    dst[3] = src[3];
267
    dst[4] = src[4];
268
    dst[5] = src[5];
269
    dst[6] = src[6];
270
    dst[7] = src[7];
271
}
272

    
273
void set_cwp(int new_cwp)
274
{
275
    /* put the modified wrap registers at their proper location */
276
    if (env->cwp == (NWINDOWS - 1))
277
        memcpy32(env->regbase, env->regbase + NWINDOWS * 16);
278
    env->cwp = new_cwp;
279
    /* put the wrap registers at their temporary location */
280
    if (new_cwp == (NWINDOWS - 1))
281
        memcpy32(env->regbase + NWINDOWS * 16, env->regbase);
282
    env->regwptr = env->regbase + (new_cwp * 16);
283
}
284

    
285
void cpu_set_cwp(CPUState *env1, int new_cwp)
286
{
287
    CPUState *saved_env;
288
    saved_env = env;
289
    env = env1;
290
    set_cwp(new_cwp);
291
    env = saved_env;
292
}
293

    
294
/*
295
 * Begin execution of an interruption. is_int is TRUE if coming from
296
 * the int instruction. next_eip is the EIP value AFTER the interrupt
297
 * instruction. It is only relevant if is_int is TRUE.  
298
 */
299
void do_interrupt(int intno, int is_int, int error_code, 
300
                  unsigned int next_eip, int is_hw)
301
{
302
    int cwp;
303

    
304
#ifdef DEBUG_PCALL
305
    if (loglevel & CPU_LOG_INT) {
306
        static int count;
307
        fprintf(logfile, "%6d: v=%02x e=%04x i=%d pc=%08x npc=%08x SP=%08x\n",
308
                    count, intno, error_code, is_int,
309
                    env->pc,
310
                    env->npc, env->regwptr[6]);
311
#if 1
312
        cpu_dump_state(env, logfile, fprintf, 0);
313
        {
314
            int i;
315
            uint8_t *ptr;
316

    
317
            fprintf(logfile, "       code=");
318
            ptr = (uint8_t *)env->pc;
319
            for(i = 0; i < 16; i++) {
320
                fprintf(logfile, " %02x", ldub(ptr + i));
321
            }
322
            fprintf(logfile, "\n");
323
        }
324
#endif
325
        count++;
326
    }
327
#endif
328
#if !defined(CONFIG_USER_ONLY) 
329
    if (env->psret == 0) {
330
        cpu_abort(cpu_single_env, "Trap while interrupts disabled, Error state");
331
        return;
332
    }
333
#endif
334
    env->psret = 0;
335
    cwp = (env->cwp - 1) & (NWINDOWS - 1); 
336
    set_cwp(cwp);
337
    env->regwptr[9] = env->pc - 4; // XXX?
338
    env->regwptr[10] = env->pc;
339
    env->psrps = env->psrs;
340
    env->psrs = 1;
341
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
342
    env->pc = env->tbr;
343
    env->npc = env->pc + 4;
344
    env->exception_index = 0;
345
}
346

    
347
void raise_exception_err(int exception_index, int error_code)
348
{
349
    raise_exception(exception_index);
350
}
351

    
352
uint32_t mmu_probe(uint32_t address, int mmulev)
353
{
354
    target_phys_addr_t pde_ptr;
355
    uint32_t pde;
356

    
357
    /* Context base + context number */
358
    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 4);
359
    cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
360
    bswap32s(&pde);
361
    switch (pde & PTE_ENTRYTYPE_MASK) {
362
    default:
363
    case 0: /* Invalid */
364
    case 2: /* PTE, maybe should not happen? */
365
    case 3: /* Reserved */
366
        return 0;
367
    case 1: /* L1 PDE */
368
        if (mmulev == 3)
369
            return pde;
370
        pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
371
        cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
372
        bswap32s(&pde);
373

    
374
        switch (pde & PTE_ENTRYTYPE_MASK) {
375
        default:
376
        case 0: /* Invalid */
377
        case 3: /* Reserved */
378
            return 0;
379
        case 2: /* L1 PTE */
380
            return pde;
381
        case 1: /* L2 PDE */
382
            if (mmulev == 2)
383
                return pde;
384
            pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
385
            cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
386
            bswap32s(&pde);
387

    
388
            switch (pde & PTE_ENTRYTYPE_MASK) {
389
            default:
390
            case 0: /* Invalid */
391
            case 3: /* Reserved */
392
                return 0;
393
            case 2: /* L2 PTE */
394
                return pde;
395
            case 1: /* L3 PDE */
396
                if (mmulev == 1)
397
                    return pde;
398
                pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
399
                cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
400
                bswap32s(&pde);
401

    
402
                switch (pde & PTE_ENTRYTYPE_MASK) {
403
                default:
404
                case 0: /* Invalid */
405
                case 1: /* PDE, should not happen */
406
                case 3: /* Reserved */
407
                    return 0;
408
                case 2: /* L3 PTE */
409
                    return pde;
410
                }
411
            }
412
        }
413
    }
414
    return 0;
415
}
416

    
417
void dump_mmu(void)
418
{
419
#ifdef DEBUG_MMU
420
    uint32_t pa, va, va1, va2;
421
    int n, m, o;
422
    target_phys_addr_t pde_ptr;
423
    uint32_t pde;
424

    
425
    printf("MMU dump:\n");
426
    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 4);
427
    cpu_physical_memory_read(pde_ptr, (uint8_t *)&pde, 4);
428
    bswap32s(&pde);
429
    printf("Root ptr: 0x%08x, ctx: %d\n", env->mmuregs[1] << 4, env->mmuregs[2]);
430
    for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
431
        pde_ptr = mmu_probe(va, 2);
432
        if (pde_ptr) {
433
            pa = cpu_get_phys_page_debug(env, va);
434
            printf("VA: 0x%08x, PA: 0x%08x PDE: 0x%08x\n", va, pa, pde_ptr);
435
            for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
436
                pde_ptr = mmu_probe(va1, 1);
437
                if (pde_ptr) {
438
                    pa = cpu_get_phys_page_debug(env, va1);
439
                    printf(" VA: 0x%08x, PA: 0x%08x PDE: 0x%08x\n", va1, pa, pde_ptr);
440
                    for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
441
                        pde_ptr = mmu_probe(va2, 0);
442
                        if (pde_ptr) {
443
                            pa = cpu_get_phys_page_debug(env, va2);
444
                            printf("  VA: 0x%08x, PA: 0x%08x PTE: 0x%08x\n", va2, pa, pde_ptr);
445
                        }
446
                    }
447
                }
448
            }
449
        }
450
    }
451
    printf("MMU dump ends\n");
452
#endif
453
}