root / target-ppc / mmu-hash64.c @ cb446eca
History | View | Annotate | Download (15.4 kB)
1 |
/*
|
---|---|
2 |
* PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
|
3 |
*
|
4 |
* Copyright (c) 2003-2007 Jocelyn Mayer
|
5 |
* Copyright (c) 2013 David Gibson, IBM Corporation
|
6 |
*
|
7 |
* This library is free software; you can redistribute it and/or
|
8 |
* modify it under the terms of the GNU Lesser General Public
|
9 |
* License as published by the Free Software Foundation; either
|
10 |
* version 2 of the License, or (at your option) any later version.
|
11 |
*
|
12 |
* This library is distributed in the hope that it will be useful,
|
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
* Lesser General Public License for more details.
|
16 |
*
|
17 |
* You should have received a copy of the GNU Lesser General Public
|
18 |
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
19 |
*/
|
20 |
#include "cpu.h" |
21 |
#include "helper.h" |
22 |
#include "sysemu/kvm.h" |
23 |
#include "kvm_ppc.h" |
24 |
#include "mmu-hash64.h" |
25 |
|
26 |
//#define DEBUG_MMU
|
27 |
//#define DEBUG_SLB
|
28 |
|
29 |
#ifdef DEBUG_MMU
|
30 |
# define LOG_MMU(...) qemu_log(__VA_ARGS__)
|
31 |
# define LOG_MMU_STATE(env) log_cpu_state((env), 0) |
32 |
#else
|
33 |
# define LOG_MMU(...) do { } while (0) |
34 |
# define LOG_MMU_STATE(...) do { } while (0) |
35 |
#endif
|
36 |
|
37 |
#ifdef DEBUG_SLB
|
38 |
# define LOG_SLB(...) qemu_log(__VA_ARGS__)
|
39 |
#else
|
40 |
# define LOG_SLB(...) do { } while (0) |
41 |
#endif
|
42 |
|
43 |
/*
|
44 |
* SLB handling
|
45 |
*/
|
46 |
|
47 |
static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
|
48 |
{ |
49 |
uint64_t esid_256M, esid_1T; |
50 |
int n;
|
51 |
|
52 |
LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); |
53 |
|
54 |
esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; |
55 |
esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; |
56 |
|
57 |
for (n = 0; n < env->slb_nr; n++) { |
58 |
ppc_slb_t *slb = &env->slb[n]; |
59 |
|
60 |
LOG_SLB("%s: slot %d %016" PRIx64 " %016" |
61 |
PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
|
62 |
/* We check for 1T matches on all MMUs here - if the MMU
|
63 |
* doesn't have 1T segment support, we will have prevented 1T
|
64 |
* entries from being inserted in the slbmte code. */
|
65 |
if (((slb->esid == esid_256M) &&
|
66 |
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) |
67 |
|| ((slb->esid == esid_1T) && |
68 |
((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { |
69 |
return slb;
|
70 |
} |
71 |
} |
72 |
|
73 |
return NULL; |
74 |
} |
75 |
|
76 |
void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
|
77 |
{ |
78 |
int i;
|
79 |
uint64_t slbe, slbv; |
80 |
|
81 |
cpu_synchronize_state(CPU(ppc_env_get_cpu(env))); |
82 |
|
83 |
cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
|
84 |
for (i = 0; i < env->slb_nr; i++) { |
85 |
slbe = env->slb[i].esid; |
86 |
slbv = env->slb[i].vsid; |
87 |
if (slbe == 0 && slbv == 0) { |
88 |
continue;
|
89 |
} |
90 |
cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", |
91 |
i, slbe, slbv); |
92 |
} |
93 |
} |
94 |
|
95 |
void helper_slbia(CPUPPCState *env)
|
96 |
{ |
97 |
int n, do_invalidate;
|
98 |
|
99 |
do_invalidate = 0;
|
100 |
/* XXX: Warning: slbia never invalidates the first segment */
|
101 |
for (n = 1; n < env->slb_nr; n++) { |
102 |
ppc_slb_t *slb = &env->slb[n]; |
103 |
|
104 |
if (slb->esid & SLB_ESID_V) {
|
105 |
slb->esid &= ~SLB_ESID_V; |
106 |
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
107 |
* and we still don't have a tlb_flush_mask(env, n, mask)
|
108 |
* in QEMU, we just invalidate all TLBs
|
109 |
*/
|
110 |
do_invalidate = 1;
|
111 |
} |
112 |
} |
113 |
if (do_invalidate) {
|
114 |
tlb_flush(env, 1);
|
115 |
} |
116 |
} |
117 |
|
118 |
void helper_slbie(CPUPPCState *env, target_ulong addr)
|
119 |
{ |
120 |
ppc_slb_t *slb; |
121 |
|
122 |
slb = slb_lookup(env, addr); |
123 |
if (!slb) {
|
124 |
return;
|
125 |
} |
126 |
|
127 |
if (slb->esid & SLB_ESID_V) {
|
128 |
slb->esid &= ~SLB_ESID_V; |
129 |
|
130 |
/* XXX: given the fact that segment size is 256 MB or 1TB,
|
131 |
* and we still don't have a tlb_flush_mask(env, n, mask)
|
132 |
* in QEMU, we just invalidate all TLBs
|
133 |
*/
|
134 |
tlb_flush(env, 1);
|
135 |
} |
136 |
} |
137 |
|
138 |
int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
139 |
{ |
140 |
int slot = rb & 0xfff; |
141 |
ppc_slb_t *slb = &env->slb[slot]; |
142 |
|
143 |
if (rb & (0x1000 - env->slb_nr)) { |
144 |
return -1; /* Reserved bits set or slot too high */ |
145 |
} |
146 |
if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
|
147 |
return -1; /* Bad segment size */ |
148 |
} |
149 |
if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
|
150 |
return -1; /* 1T segment on MMU that doesn't support it */ |
151 |
} |
152 |
|
153 |
/* Mask out the slot number as we store the entry */
|
154 |
slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V); |
155 |
slb->vsid = rs; |
156 |
|
157 |
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 |
158 |
" %016" PRIx64 "\n", __func__, slot, rb, rs, |
159 |
slb->esid, slb->vsid); |
160 |
|
161 |
return 0; |
162 |
} |
163 |
|
164 |
static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb, |
165 |
target_ulong *rt) |
166 |
{ |
167 |
int slot = rb & 0xfff; |
168 |
ppc_slb_t *slb = &env->slb[slot]; |
169 |
|
170 |
if (slot >= env->slb_nr) {
|
171 |
return -1; |
172 |
} |
173 |
|
174 |
*rt = slb->esid; |
175 |
return 0; |
176 |
} |
177 |
|
178 |
static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb, |
179 |
target_ulong *rt) |
180 |
{ |
181 |
int slot = rb & 0xfff; |
182 |
ppc_slb_t *slb = &env->slb[slot]; |
183 |
|
184 |
if (slot >= env->slb_nr) {
|
185 |
return -1; |
186 |
} |
187 |
|
188 |
*rt = slb->vsid; |
189 |
return 0; |
190 |
} |
191 |
|
192 |
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
|
193 |
{ |
194 |
if (ppc_store_slb(env, rb, rs) < 0) { |
195 |
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
196 |
POWERPC_EXCP_INVAL); |
197 |
} |
198 |
} |
199 |
|
200 |
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) |
201 |
{ |
202 |
target_ulong rt = 0;
|
203 |
|
204 |
if (ppc_load_slb_esid(env, rb, &rt) < 0) { |
205 |
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
206 |
POWERPC_EXCP_INVAL); |
207 |
} |
208 |
return rt;
|
209 |
} |
210 |
|
211 |
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) |
212 |
{ |
213 |
target_ulong rt = 0;
|
214 |
|
215 |
if (ppc_load_slb_vsid(env, rb, &rt) < 0) { |
216 |
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
217 |
POWERPC_EXCP_INVAL); |
218 |
} |
219 |
return rt;
|
220 |
} |
221 |
|
222 |
/*
|
223 |
* 64-bit hash table MMU handling
|
224 |
*/
|
225 |
|
226 |
static int ppc_hash64_pte_prot(CPUPPCState *env, |
227 |
ppc_slb_t *slb, ppc_hash_pte64_t pte) |
228 |
{ |
229 |
unsigned pp, key;
|
230 |
/* Some pp bit combinations have undefined behaviour, so default
|
231 |
* to no access in those cases */
|
232 |
int prot = 0; |
233 |
|
234 |
key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) |
235 |
: (slb->vsid & SLB_VSID_KS)); |
236 |
pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
|
237 |
|
238 |
if (key == 0) { |
239 |
switch (pp) {
|
240 |
case 0x0: |
241 |
case 0x1: |
242 |
case 0x2: |
243 |
prot = PAGE_READ | PAGE_WRITE; |
244 |
break;
|
245 |
|
246 |
case 0x3: |
247 |
case 0x6: |
248 |
prot = PAGE_READ; |
249 |
break;
|
250 |
} |
251 |
} else {
|
252 |
switch (pp) {
|
253 |
case 0x0: |
254 |
case 0x6: |
255 |
prot = 0;
|
256 |
break;
|
257 |
|
258 |
case 0x1: |
259 |
case 0x3: |
260 |
prot = PAGE_READ; |
261 |
break;
|
262 |
|
263 |
case 0x2: |
264 |
prot = PAGE_READ | PAGE_WRITE; |
265 |
break;
|
266 |
} |
267 |
} |
268 |
|
269 |
/* No execute if either noexec or guarded bits set */
|
270 |
if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
|
271 |
|| (slb->vsid & SLB_VSID_N)) { |
272 |
prot |= PAGE_EXEC; |
273 |
} |
274 |
|
275 |
return prot;
|
276 |
} |
277 |
|
278 |
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte) |
279 |
{ |
280 |
int key, amrbits;
|
281 |
int prot = PAGE_EXEC;
|
282 |
|
283 |
|
284 |
/* Only recent MMUs implement Virtual Page Class Key Protection */
|
285 |
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
|
286 |
return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
287 |
} |
288 |
|
289 |
key = HPTE64_R_KEY(pte.pte1); |
290 |
amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3; |
291 |
|
292 |
/* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
|
293 |
/* env->spr[SPR_AMR]); */
|
294 |
|
295 |
if (amrbits & 0x2) { |
296 |
prot |= PAGE_WRITE; |
297 |
} |
298 |
if (amrbits & 0x1) { |
299 |
prot |= PAGE_READ; |
300 |
} |
301 |
|
302 |
return prot;
|
303 |
} |
304 |
|
305 |
static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr pteg_off,
|
306 |
bool secondary, target_ulong ptem,
|
307 |
ppc_hash_pte64_t *pte) |
308 |
{ |
309 |
hwaddr pte_offset = pteg_off; |
310 |
target_ulong pte0, pte1; |
311 |
int i;
|
312 |
|
313 |
for (i = 0; i < HPTES_PER_GROUP; i++) { |
314 |
pte0 = ppc_hash64_load_hpte0(env, pte_offset); |
315 |
pte1 = ppc_hash64_load_hpte1(env, pte_offset); |
316 |
|
317 |
if ((pte0 & HPTE64_V_VALID)
|
318 |
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY)) |
319 |
&& HPTE64_V_COMPARE(pte0, ptem)) { |
320 |
pte->pte0 = pte0; |
321 |
pte->pte1 = pte1; |
322 |
return pte_offset;
|
323 |
} |
324 |
|
325 |
pte_offset += HASH_PTE_SIZE_64; |
326 |
} |
327 |
|
328 |
return -1; |
329 |
} |
330 |
|
331 |
static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
|
332 |
ppc_slb_t *slb, target_ulong eaddr, |
333 |
ppc_hash_pte64_t *pte) |
334 |
{ |
335 |
hwaddr pteg_off, pte_offset; |
336 |
hwaddr hash; |
337 |
uint64_t vsid, epnshift, epnmask, epn, ptem; |
338 |
|
339 |
/* Page size according to the SLB, which we use to generate the
|
340 |
* EPN for hash table lookup.. When we implement more recent MMU
|
341 |
* extensions this might be different from the actual page size
|
342 |
* encoded in the PTE */
|
343 |
epnshift = (slb->vsid & SLB_VSID_L) |
344 |
? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; |
345 |
epnmask = ~((1ULL << epnshift) - 1); |
346 |
|
347 |
if (slb->vsid & SLB_VSID_B) {
|
348 |
/* 1TB segment */
|
349 |
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; |
350 |
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; |
351 |
hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
|
352 |
} else {
|
353 |
/* 256M segment */
|
354 |
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; |
355 |
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; |
356 |
hash = vsid ^ (epn >> epnshift); |
357 |
} |
358 |
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
|
359 |
|
360 |
/* Page address translation */
|
361 |
LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx |
362 |
" hash " TARGET_FMT_plx "\n", |
363 |
env->htab_base, env->htab_mask, hash); |
364 |
|
365 |
/* Primary PTEG lookup */
|
366 |
LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx |
367 |
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx |
368 |
" hash=" TARGET_FMT_plx "\n", |
369 |
env->htab_base, env->htab_mask, vsid, ptem, hash); |
370 |
pteg_off = (hash * HASH_PTEG_SIZE_64) & env->htab_mask; |
371 |
pte_offset = ppc_hash64_pteg_search(env, pteg_off, 0, ptem, pte);
|
372 |
|
373 |
if (pte_offset == -1) { |
374 |
/* Secondary PTEG lookup */
|
375 |
LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx |
376 |
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx |
377 |
" hash=" TARGET_FMT_plx "\n", env->htab_base, |
378 |
env->htab_mask, vsid, ptem, ~hash); |
379 |
|
380 |
pteg_off = (~hash * HASH_PTEG_SIZE_64) & env->htab_mask; |
381 |
pte_offset = ppc_hash64_pteg_search(env, pteg_off, 1, ptem, pte);
|
382 |
} |
383 |
|
384 |
return pte_offset;
|
385 |
} |
386 |
|
387 |
static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
|
388 |
target_ulong eaddr) |
389 |
{ |
390 |
hwaddr rpn = pte.pte1 & HPTE64_R_RPN; |
391 |
/* FIXME: Add support for SLLP extended page sizes */
|
392 |
int target_page_bits = (slb->vsid & SLB_VSID_L)
|
393 |
? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; |
394 |
hwaddr mask = (1ULL << target_page_bits) - 1; |
395 |
|
396 |
return (rpn & ~mask) | (eaddr & mask);
|
397 |
} |
398 |
|
399 |
int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
|
400 |
int rwx, int mmu_idx) |
401 |
{ |
402 |
ppc_slb_t *slb; |
403 |
hwaddr pte_offset; |
404 |
ppc_hash_pte64_t pte; |
405 |
int pp_prot, amr_prot, prot;
|
406 |
uint64_t new_pte1; |
407 |
const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; |
408 |
hwaddr raddr; |
409 |
|
410 |
assert((rwx == 0) || (rwx == 1) || (rwx == 2)); |
411 |
|
412 |
/* 1. Handle real mode accesses */
|
413 |
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { |
414 |
/* Translation is off */
|
415 |
/* In real mode the top 4 effective address bits are ignored */
|
416 |
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
|
417 |
tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
418 |
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, |
419 |
TARGET_PAGE_SIZE); |
420 |
return 0; |
421 |
} |
422 |
|
423 |
/* 2. Translation is on, so look up the SLB */
|
424 |
slb = slb_lookup(env, eaddr); |
425 |
|
426 |
if (!slb) {
|
427 |
if (rwx == 2) { |
428 |
env->exception_index = POWERPC_EXCP_ISEG; |
429 |
env->error_code = 0;
|
430 |
} else {
|
431 |
env->exception_index = POWERPC_EXCP_DSEG; |
432 |
env->error_code = 0;
|
433 |
env->spr[SPR_DAR] = eaddr; |
434 |
} |
435 |
return 1; |
436 |
} |
437 |
|
438 |
/* 3. Check for segment level no-execute violation */
|
439 |
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { |
440 |
env->exception_index = POWERPC_EXCP_ISI; |
441 |
env->error_code = 0x10000000;
|
442 |
return 1; |
443 |
} |
444 |
|
445 |
/* 4. Locate the PTE in the hash table */
|
446 |
pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte); |
447 |
if (pte_offset == -1) { |
448 |
if (rwx == 2) { |
449 |
env->exception_index = POWERPC_EXCP_ISI; |
450 |
env->error_code = 0x40000000;
|
451 |
} else {
|
452 |
env->exception_index = POWERPC_EXCP_DSI; |
453 |
env->error_code = 0;
|
454 |
env->spr[SPR_DAR] = eaddr; |
455 |
if (rwx == 1) { |
456 |
env->spr[SPR_DSISR] = 0x42000000;
|
457 |
} else {
|
458 |
env->spr[SPR_DSISR] = 0x40000000;
|
459 |
} |
460 |
} |
461 |
return 1; |
462 |
} |
463 |
LOG_MMU("found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); |
464 |
|
465 |
/* 5. Check access permissions */
|
466 |
|
467 |
pp_prot = ppc_hash64_pte_prot(env, slb, pte); |
468 |
amr_prot = ppc_hash64_amr_prot(env, pte); |
469 |
prot = pp_prot & amr_prot; |
470 |
|
471 |
if ((need_prot[rwx] & ~prot) != 0) { |
472 |
/* Access right violation */
|
473 |
LOG_MMU("PTE access rejected\n");
|
474 |
if (rwx == 2) { |
475 |
env->exception_index = POWERPC_EXCP_ISI; |
476 |
env->error_code = 0x08000000;
|
477 |
} else {
|
478 |
target_ulong dsisr = 0;
|
479 |
|
480 |
env->exception_index = POWERPC_EXCP_DSI; |
481 |
env->error_code = 0;
|
482 |
env->spr[SPR_DAR] = eaddr; |
483 |
if (need_prot[rwx] & ~pp_prot) {
|
484 |
dsisr |= 0x08000000;
|
485 |
} |
486 |
if (rwx == 1) { |
487 |
dsisr |= 0x02000000;
|
488 |
} |
489 |
if (need_prot[rwx] & ~amr_prot) {
|
490 |
dsisr |= 0x00200000;
|
491 |
} |
492 |
env->spr[SPR_DSISR] = dsisr; |
493 |
} |
494 |
return 1; |
495 |
} |
496 |
|
497 |
LOG_MMU("PTE access granted !\n");
|
498 |
|
499 |
/* 6. Update PTE referenced and changed bits if necessary */
|
500 |
|
501 |
new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
|
502 |
if (rwx == 1) { |
503 |
new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
|
504 |
} else {
|
505 |
/* Treat the page as read-only for now, so that a later write
|
506 |
* will pass through this function again to set the C bit */
|
507 |
prot &= ~PAGE_WRITE; |
508 |
} |
509 |
|
510 |
if (new_pte1 != pte.pte1) {
|
511 |
ppc_hash64_store_hpte1(env, pte_offset, new_pte1); |
512 |
} |
513 |
|
514 |
/* 7. Determine the real address from the PTE */
|
515 |
|
516 |
raddr = ppc_hash64_pte_raddr(slb, pte, eaddr); |
517 |
|
518 |
tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
519 |
prot, mmu_idx, TARGET_PAGE_SIZE); |
520 |
|
521 |
return 0; |
522 |
} |
523 |
|
524 |
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr) |
525 |
{ |
526 |
ppc_slb_t *slb; |
527 |
hwaddr pte_offset; |
528 |
ppc_hash_pte64_t pte; |
529 |
|
530 |
if (msr_dr == 0) { |
531 |
/* In real mode the top 4 effective address bits are ignored */
|
532 |
return addr & 0x0FFFFFFFFFFFFFFFULL; |
533 |
} |
534 |
|
535 |
slb = slb_lookup(env, addr); |
536 |
if (!slb) {
|
537 |
return -1; |
538 |
} |
539 |
|
540 |
pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte); |
541 |
if (pte_offset == -1) { |
542 |
return -1; |
543 |
} |
544 |
|
545 |
return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
|
546 |
} |