root / target-ppc / mmu-hash64.c @ bb218042
History | View | Annotate | Download (17.6 kB)
1 |
/*
|
---|---|
2 |
* PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
|
3 |
*
|
4 |
* Copyright (c) 2003-2007 Jocelyn Mayer
|
5 |
* Copyright (c) 2013 David Gibson, IBM Corporation
|
6 |
*
|
7 |
* This library is free software; you can redistribute it and/or
|
8 |
* modify it under the terms of the GNU Lesser General Public
|
9 |
* License as published by the Free Software Foundation; either
|
10 |
* version 2 of the License, or (at your option) any later version.
|
11 |
*
|
12 |
* This library is distributed in the hope that it will be useful,
|
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
* Lesser General Public License for more details.
|
16 |
*
|
17 |
* You should have received a copy of the GNU Lesser General Public
|
18 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
19 |
*/
|
20 |
#include "cpu.h" |
21 |
#include "helper.h" |
22 |
#include "sysemu/kvm.h" |
23 |
#include "kvm_ppc.h" |
24 |
#include "mmu-hash64.h" |
25 |
|
26 |
//#define DEBUG_MMU
|
27 |
//#define DEBUG_SLB
|
28 |
|
29 |
#ifdef DEBUG_MMU
|
30 |
# define LOG_MMU(...) qemu_log(__VA_ARGS__)
|
31 |
# define LOG_MMU_STATE(env) log_cpu_state((env), 0) |
32 |
#else
|
33 |
# define LOG_MMU(...) do { } while (0) |
34 |
# define LOG_MMU_STATE(...) do { } while (0) |
35 |
#endif
|
36 |
|
37 |
#ifdef DEBUG_SLB
|
38 |
# define LOG_SLB(...) qemu_log(__VA_ARGS__)
|
39 |
#else
|
40 |
# define LOG_SLB(...) do { } while (0) |
41 |
#endif
|
42 |
|
43 |
struct mmu_ctx_hash64 {
|
44 |
hwaddr raddr; /* Real address */
|
45 |
int prot; /* Protection bits */ |
46 |
hwaddr hash[2]; /* Pagetable hash values */ |
47 |
target_ulong ptem; /* Virtual segment ID | API */
|
48 |
int key; /* Access key */ |
49 |
}; |
50 |
|
51 |
/*
|
52 |
* SLB handling
|
53 |
*/
|
54 |
|
55 |
static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
|
56 |
{ |
57 |
uint64_t esid_256M, esid_1T; |
58 |
int n;
|
59 |
|
60 |
LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); |
61 |
|
62 |
esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; |
63 |
esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; |
64 |
|
65 |
for (n = 0; n < env->slb_nr; n++) { |
66 |
ppc_slb_t *slb = &env->slb[n]; |
67 |
|
68 |
LOG_SLB("%s: slot %d %016" PRIx64 " %016" |
69 |
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
|
70 |
/* We check for 1T matches on all MMUs here - if the MMU
|
71 |
* doesn't have 1T segment support, we will have prevented 1T
|
72 |
* entries from being inserted in the slbmte code. */
|
73 |
if (((slb->esid == esid_256M) &&
|
74 |
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) |
75 |
|| ((slb->esid == esid_1T) && |
76 |
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { |
77 |
return slb;
|
78 |
} |
79 |
} |
80 |
|
81 |
return NULL; |
82 |
} |
83 |
|
84 |
void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
|
85 |
{ |
86 |
int i;
|
87 |
uint64_t slbe, slbv; |
88 |
|
89 |
cpu_synchronize_state(env); |
90 |
|
91 |
cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
|
92 |
for (i = 0; i < env->slb_nr; i++) { |
93 |
slbe = env->slb[i].esid; |
94 |
slbv = env->slb[i].vsid; |
95 |
if (slbe == 0 && slbv == 0) { |
96 |
continue;
|
97 |
} |
98 |
cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", |
99 |
i, slbe, slbv); |
100 |
} |
101 |
} |
102 |
|
103 |
void helper_slbia(CPUPPCState *env)
|
104 |
{ |
105 |
int n, do_invalidate;
|
106 |
|
107 |
do_invalidate = 0;
|
108 |
/* XXX: Warning: slbia never invalidates the first segment */
|
109 |
for (n = 1; n < env->slb_nr; n++) { |
110 |
ppc_slb_t *slb = &env->slb[n]; |
111 |
|
112 |
if (slb->esid & SLB_ESID_V) {
|
113 |
slb->esid &= ~SLB_ESID_V; |
114 |
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
115 |
* and we still don't have a tlb_flush_mask(env, n, mask)
|
116 |
* in QEMU, we just invalidate all TLBs
|
117 |
*/
|
118 |
do_invalidate = 1;
|
119 |
} |
120 |
} |
121 |
if (do_invalidate) {
|
122 |
tlb_flush(env, 1);
|
123 |
} |
124 |
} |
125 |
|
126 |
void helper_slbie(CPUPPCState *env, target_ulong addr)
|
127 |
{ |
128 |
ppc_slb_t *slb; |
129 |
|
130 |
slb = slb_lookup(env, addr); |
131 |
if (!slb) {
|
132 |
return;
|
133 |
} |
134 |
|
135 |
if (slb->esid & SLB_ESID_V) {
|
136 |
slb->esid &= ~SLB_ESID_V; |
137 |
|
138 |
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
139 |
* and we still don't have a tlb_flush_mask(env, n, mask)
|
140 |
* in QEMU, we just invalidate all TLBs
|
141 |
*/
|
142 |
tlb_flush(env, 1);
|
143 |
} |
144 |
} |
145 |
|
146 |
int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
147 |
{ |
148 |
int slot = rb & 0xfff; |
149 |
ppc_slb_t *slb = &env->slb[slot]; |
150 |
|
151 |
if (rb & (0x1000 - env->slb_nr)) { |
152 |
return -1; /* Reserved bits set or slot too high */ |
153 |
} |
154 |
if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
|
155 |
return -1; /* Bad segment size */ |
156 |
} |
157 |
if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
|
158 |
return -1; /* 1T segment on MMU that doesn't support it */ |
159 |
} |
160 |
|
161 |
/* Mask out the slot number as we store the entry */
|
162 |
slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); |
163 |
slb->vsid = rs; |
164 |
|
165 |
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 |
166 |
" %016" PRIx64 "\n", __func__, slot, rb, rs, |
167 |
slb->esid, slb->vsid); |
168 |
|
169 |
return 0; |
170 |
} |
171 |
|
172 |
static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb, |
173 |
target_ulong *rt) |
174 |
{ |
175 |
int slot = rb & 0xfff; |
176 |
ppc_slb_t *slb = &env->slb[slot]; |
177 |
|
178 |
if (slot >= env->slb_nr) {
|
179 |
return -1; |
180 |
} |
181 |
|
182 |
*rt = slb->esid; |
183 |
return 0; |
184 |
} |
185 |
|
186 |
static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb, |
187 |
target_ulong *rt) |
188 |
{ |
189 |
int slot = rb & 0xfff; |
190 |
ppc_slb_t *slb = &env->slb[slot]; |
191 |
|
192 |
if (slot >= env->slb_nr) {
|
193 |
return -1; |
194 |
} |
195 |
|
196 |
*rt = slb->vsid; |
197 |
return 0; |
198 |
} |
199 |
|
200 |
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
201 |
{ |
202 |
if (ppc_store_slb(env, rb, rs) < 0) { |
203 |
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
204 |
POWERPC_EXCP_INVAL); |
205 |
} |
206 |
} |
207 |
|
208 |
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) |
209 |
{ |
210 |
target_ulong rt = 0;
|
211 |
|
212 |
if (ppc_load_slb_esid(env, rb, &rt) < 0) { |
213 |
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
214 |
POWERPC_EXCP_INVAL); |
215 |
} |
216 |
return rt;
|
217 |
} |
218 |
|
219 |
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) |
220 |
{ |
221 |
target_ulong rt = 0;
|
222 |
|
223 |
if (ppc_load_slb_vsid(env, rb, &rt) < 0) { |
224 |
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
225 |
POWERPC_EXCP_INVAL); |
226 |
} |
227 |
return rt;
|
228 |
} |
229 |
|
230 |
/*
|
231 |
* 64-bit hash table MMU handling
|
232 |
*/
|
233 |
|
234 |
#define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F) |
235 |
|
236 |
static int ppc_hash64_pp_check(int key, int pp, bool nx) |
237 |
{ |
238 |
int access;
|
239 |
|
240 |
/* Compute access rights */
|
241 |
/* When pp is 4, 5 or 7, the result is undefined. Set it to noaccess */
|
242 |
access = 0;
|
243 |
if (key == 0) { |
244 |
switch (pp) {
|
245 |
case 0x0: |
246 |
case 0x1: |
247 |
case 0x2: |
248 |
access |= PAGE_WRITE; |
249 |
/* No break here */
|
250 |
case 0x3: |
251 |
case 0x6: |
252 |
access |= PAGE_READ; |
253 |
break;
|
254 |
} |
255 |
} else {
|
256 |
switch (pp) {
|
257 |
case 0x0: |
258 |
case 0x6: |
259 |
access = 0;
|
260 |
break;
|
261 |
case 0x1: |
262 |
case 0x3: |
263 |
access = PAGE_READ; |
264 |
break;
|
265 |
case 0x2: |
266 |
access = PAGE_READ | PAGE_WRITE; |
267 |
break;
|
268 |
} |
269 |
} |
270 |
if (!nx) {
|
271 |
access |= PAGE_EXEC; |
272 |
} |
273 |
|
274 |
return access;
|
275 |
} |
276 |
|
277 |
static int ppc_hash64_check_prot(int prot, int rwx) |
278 |
{ |
279 |
int ret;
|
280 |
|
281 |
if (rwx == 2) { |
282 |
if (prot & PAGE_EXEC) {
|
283 |
ret = 0;
|
284 |
} else {
|
285 |
ret = -2;
|
286 |
} |
287 |
} else if (rwx == 1) { |
288 |
if (prot & PAGE_WRITE) {
|
289 |
ret = 0;
|
290 |
} else {
|
291 |
ret = -2;
|
292 |
} |
293 |
} else {
|
294 |
if (prot & PAGE_READ) {
|
295 |
ret = 0;
|
296 |
} else {
|
297 |
ret = -2;
|
298 |
} |
299 |
} |
300 |
|
301 |
return ret;
|
302 |
} |
303 |
|
304 |
static int pte64_check(struct mmu_ctx_hash64 *ctx, target_ulong pte0, |
305 |
target_ulong pte1, int h, int rwx) |
306 |
{ |
307 |
target_ulong mmask; |
308 |
int access, ret, pp;
|
309 |
|
310 |
ret = -1;
|
311 |
/* Check validity and table match */
|
312 |
if ((pte0 & HPTE64_V_VALID) && (h == !!(pte0 & HPTE64_V_SECONDARY))) {
|
313 |
bool nx;
|
314 |
|
315 |
/* Check vsid & api */
|
316 |
mmask = PTE64_CHECK_MASK; |
317 |
pp = (pte1 & HPTE64_R_PP) | ((pte1 & HPTE64_R_PP0) >> 61);
|
318 |
/* No execute if either noexec or guarded bits set */
|
319 |
nx = (pte1 & HPTE64_R_N) || (pte1 & HPTE64_R_G); |
320 |
if (HPTE64_V_COMPARE(pte0, ctx->ptem)) {
|
321 |
if (ctx->raddr != (hwaddr)-1ULL) { |
322 |
/* all matches should have equal RPN, WIMG & PP */
|
323 |
if ((ctx->raddr & mmask) != (pte1 & mmask)) {
|
324 |
qemu_log("Bad RPN/WIMG/PP\n");
|
325 |
return -3; |
326 |
} |
327 |
} |
328 |
/* Compute access rights */
|
329 |
access = ppc_hash64_pp_check(ctx->key, pp, nx); |
330 |
/* Keep the matching PTE informations */
|
331 |
ctx->raddr = pte1; |
332 |
ctx->prot = access; |
333 |
ret = ppc_hash64_check_prot(ctx->prot, rwx); |
334 |
if (ret == 0) { |
335 |
/* Access granted */
|
336 |
LOG_MMU("PTE access granted !\n");
|
337 |
} else {
|
338 |
/* Access right violation */
|
339 |
LOG_MMU("PTE access rejected\n");
|
340 |
} |
341 |
} |
342 |
} |
343 |
|
344 |
return ret;
|
345 |
} |
346 |
|
347 |
static int ppc_hash64_pte_update_flags(struct mmu_ctx_hash64 *ctx, |
348 |
target_ulong *pte1p, |
349 |
int ret, int rw) |
350 |
{ |
351 |
int store = 0; |
352 |
|
353 |
/* Update page flags */
|
354 |
if (!(*pte1p & HPTE64_R_R)) {
|
355 |
/* Update accessed flag */
|
356 |
*pte1p |= HPTE64_R_R; |
357 |
store = 1;
|
358 |
} |
359 |
if (!(*pte1p & HPTE64_R_C)) {
|
360 |
if (rw == 1 && ret == 0) { |
361 |
/* Update changed flag */
|
362 |
*pte1p |= HPTE64_R_C; |
363 |
store = 1;
|
364 |
} else {
|
365 |
/* Force page fault for first write access */
|
366 |
ctx->prot &= ~PAGE_WRITE; |
367 |
} |
368 |
} |
369 |
|
370 |
return store;
|
371 |
} |
372 |
|
373 |
/* PTE table lookup */
|
374 |
static int find_pte64(CPUPPCState *env, struct mmu_ctx_hash64 *ctx, |
375 |
target_ulong eaddr, int h, int rwx, int target_page_bits) |
376 |
{ |
377 |
hwaddr pteg_off; |
378 |
target_ulong pte0, pte1; |
379 |
int i, good = -1; |
380 |
int ret, r;
|
381 |
|
382 |
ret = -1; /* No entry found */ |
383 |
pteg_off = (ctx->hash[h] * HASH_PTEG_SIZE_64) & env->htab_mask; |
384 |
for (i = 0; i < HPTES_PER_GROUP; i++) { |
385 |
pte0 = ppc_hash64_load_hpte0(env, pteg_off + i*HASH_PTE_SIZE_64); |
386 |
pte1 = ppc_hash64_load_hpte1(env, pteg_off + i*HASH_PTE_SIZE_64); |
387 |
|
388 |
r = pte64_check(ctx, pte0, pte1, h, rwx); |
389 |
LOG_MMU("Load pte from %016" HWADDR_PRIx " => " TARGET_FMT_lx " " |
390 |
TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n", |
391 |
pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h, |
392 |
(int)((pte0 >> 1) & 1), ctx->ptem); |
393 |
switch (r) {
|
394 |
case -3: |
395 |
/* PTE inconsistency */
|
396 |
return -1; |
397 |
case -2: |
398 |
/* Access violation */
|
399 |
ret = -2;
|
400 |
good = i; |
401 |
break;
|
402 |
case -1: |
403 |
default:
|
404 |
/* No PTE match */
|
405 |
break;
|
406 |
case 0: |
407 |
/* access granted */
|
408 |
/* XXX: we should go on looping to check all PTEs consistency
|
409 |
* but if we can speed-up the whole thing as the
|
410 |
* result would be undefined if PTEs are not consistent.
|
411 |
*/
|
412 |
ret = 0;
|
413 |
good = i; |
414 |
goto done;
|
415 |
} |
416 |
} |
417 |
if (good != -1) { |
418 |
done:
|
419 |
LOG_MMU("found PTE at addr %08" HWADDR_PRIx " prot=%01x ret=%d\n", |
420 |
ctx->raddr, ctx->prot, ret); |
421 |
/* Update page flags */
|
422 |
pte1 = ctx->raddr; |
423 |
if (ppc_hash64_pte_update_flags(ctx, &pte1, ret, rwx) == 1) { |
424 |
ppc_hash64_store_hpte1(env, pteg_off + good * HASH_PTE_SIZE_64, pte1); |
425 |
} |
426 |
} |
427 |
|
428 |
/* We have a TLB that saves 4K pages, so let's
|
429 |
* split a huge page to 4k chunks */
|
430 |
if (target_page_bits != TARGET_PAGE_BITS) {
|
431 |
ctx->raddr |= (eaddr & ((1 << target_page_bits) - 1)) |
432 |
& TARGET_PAGE_MASK; |
433 |
} |
434 |
return ret;
|
435 |
} |
436 |
|
437 |
static int ppc_hash64_translate(CPUPPCState *env, struct mmu_ctx_hash64 *ctx, |
438 |
target_ulong eaddr, int rwx)
|
439 |
{ |
440 |
hwaddr hash; |
441 |
target_ulong vsid; |
442 |
int pr, target_page_bits;
|
443 |
int ret, ret2;
|
444 |
ppc_slb_t *slb; |
445 |
target_ulong pageaddr; |
446 |
int segment_bits;
|
447 |
|
448 |
/* 1. Handle real mode accesses */
|
449 |
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { |
450 |
/* Translation is off */
|
451 |
/* In real mode the top 4 effective address bits are ignored */
|
452 |
ctx->raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
453 |
ctx->prot = PAGE_READ | PAGE_EXEC | PAGE_WRITE; |
454 |
return 0; |
455 |
} |
456 |
|
457 |
/* 2. Translation is on, so look up the SLB */
|
458 |
slb = slb_lookup(env, eaddr); |
459 |
|
460 |
if (!slb) {
|
461 |
return -5; |
462 |
} |
463 |
|
464 |
/* 3. Check for segment level no-execute violation */
|
465 |
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { |
466 |
return -3; |
467 |
} |
468 |
|
469 |
pr = msr_pr; |
470 |
|
471 |
if (slb->vsid & SLB_VSID_B) {
|
472 |
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; |
473 |
segment_bits = 40;
|
474 |
} else {
|
475 |
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; |
476 |
segment_bits = 28;
|
477 |
} |
478 |
|
479 |
target_page_bits = (slb->vsid & SLB_VSID_L) |
480 |
? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; |
481 |
ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) |
482 |
: (slb->vsid & SLB_VSID_KS)); |
483 |
|
484 |
pageaddr = eaddr & ((1ULL << segment_bits)
|
485 |
- (1ULL << target_page_bits));
|
486 |
if (slb->vsid & SLB_VSID_B) {
|
487 |
hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits);
|
488 |
} else {
|
489 |
hash = vsid ^ (pageaddr >> target_page_bits); |
490 |
} |
491 |
/* Only 5 bits of the page index are used in the AVPN */
|
492 |
ctx->ptem = (slb->vsid & SLB_VSID_PTEM) | |
493 |
((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80)); |
494 |
|
495 |
LOG_MMU("pte segment: key=%d nx %d vsid " TARGET_FMT_lx "\n", |
496 |
ctx->key, !!(slb->vsid & SLB_VSID_N), vsid); |
497 |
ret = -1;
|
498 |
|
499 |
/* Page address translation */
|
500 |
LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx |
501 |
" hash " TARGET_FMT_plx "\n", |
502 |
env->htab_base, env->htab_mask, hash); |
503 |
ctx->hash[0] = hash;
|
504 |
ctx->hash[1] = ~hash;
|
505 |
|
506 |
/* Initialize real address with an invalid value */
|
507 |
ctx->raddr = (hwaddr)-1ULL;
|
508 |
LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx |
509 |
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx |
510 |
" hash=" TARGET_FMT_plx "\n", |
511 |
env->htab_base, env->htab_mask, vsid, ctx->ptem, |
512 |
ctx->hash[0]);
|
513 |
/* Primary table lookup */
|
514 |
ret = find_pte64(env, ctx, eaddr, 0, rwx, target_page_bits);
|
515 |
if (ret < 0) { |
516 |
/* Secondary table lookup */
|
517 |
LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx |
518 |
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx |
519 |
" hash=" TARGET_FMT_plx "\n", env->htab_base, |
520 |
env->htab_mask, vsid, ctx->ptem, ctx->hash[1]);
|
521 |
ret2 = find_pte64(env, ctx, eaddr, 1, rwx, target_page_bits);
|
522 |
if (ret2 != -1) { |
523 |
ret = ret2; |
524 |
} |
525 |
} |
526 |
|
527 |
return ret;
|
528 |
} |
529 |
|
530 |
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr) |
531 |
{ |
532 |
struct mmu_ctx_hash64 ctx;
|
533 |
|
534 |
if (unlikely(ppc_hash64_translate(env, &ctx, addr, 0) != 0)) { |
535 |
return -1; |
536 |
} |
537 |
|
538 |
return ctx.raddr & TARGET_PAGE_MASK;
|
539 |
} |
540 |
|
541 |
int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rwx, |
542 |
int mmu_idx)
|
543 |
{ |
544 |
struct mmu_ctx_hash64 ctx;
|
545 |
int ret = 0; |
546 |
|
547 |
ret = ppc_hash64_translate(env, &ctx, address, rwx); |
548 |
if (ret == 0) { |
549 |
tlb_set_page(env, address & TARGET_PAGE_MASK, |
550 |
ctx.raddr & TARGET_PAGE_MASK, ctx.prot, |
551 |
mmu_idx, TARGET_PAGE_SIZE); |
552 |
ret = 0;
|
553 |
} else if (ret < 0) { |
554 |
LOG_MMU_STATE(env); |
555 |
if (rwx == 2) { |
556 |
switch (ret) {
|
557 |
case -1: |
558 |
env->exception_index = POWERPC_EXCP_ISI; |
559 |
env->error_code = 0x40000000;
|
560 |
break;
|
561 |
case -2: |
562 |
/* Access rights violation */
|
563 |
env->exception_index = POWERPC_EXCP_ISI; |
564 |
env->error_code = 0x08000000;
|
565 |
break;
|
566 |
case -3: |
567 |
/* No execute protection violation */
|
568 |
env->exception_index = POWERPC_EXCP_ISI; |
569 |
env->error_code = 0x10000000;
|
570 |
break;
|
571 |
case -5: |
572 |
/* No match in segment table */
|
573 |
env->exception_index = POWERPC_EXCP_ISEG; |
574 |
env->error_code = 0;
|
575 |
break;
|
576 |
} |
577 |
} else {
|
578 |
switch (ret) {
|
579 |
case -1: |
580 |
/* No matches in page tables or TLB */
|
581 |
env->exception_index = POWERPC_EXCP_DSI; |
582 |
env->error_code = 0;
|
583 |
env->spr[SPR_DAR] = address; |
584 |
if (rwx == 1) { |
585 |
env->spr[SPR_DSISR] = 0x42000000;
|
586 |
} else {
|
587 |
env->spr[SPR_DSISR] = 0x40000000;
|
588 |
} |
589 |
break;
|
590 |
case -2: |
591 |
/* Access rights violation */
|
592 |
env->exception_index = POWERPC_EXCP_DSI; |
593 |
env->error_code = 0;
|
594 |
env->spr[SPR_DAR] = address; |
595 |
if (rwx == 1) { |
596 |
env->spr[SPR_DSISR] = 0x0A000000;
|
597 |
} else {
|
598 |
env->spr[SPR_DSISR] = 0x08000000;
|
599 |
} |
600 |
break;
|
601 |
case -5: |
602 |
/* No match in segment table */
|
603 |
env->exception_index = POWERPC_EXCP_DSEG; |
604 |
env->error_code = 0;
|
605 |
env->spr[SPR_DAR] = address; |
606 |
break;
|
607 |
} |
608 |
} |
609 |
#if 0
|
610 |
printf("%s: set exception to %d %02x\n", __func__,
|
611 |
env->exception, env->error_code);
|
612 |
#endif
|
613 |
ret = 1;
|
614 |
} |
615 |
|
616 |
return ret;
|
617 |
} |