Statistics
| Branch: | Revision:

root / target-ppc / mmu-hash64.c @ 7c43bca0

History | View | Annotate | Download (16.8 kB)

1
/*
2
 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3
 *
4
 *  Copyright (c) 2003-2007 Jocelyn Mayer
5
 *  Copyright (c) 2013 David Gibson, IBM Corporation
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20
#include "cpu.h"
21
#include "helper.h"
22
#include "sysemu/kvm.h"
23
#include "kvm_ppc.h"
24
#include "mmu-hash64.h"
25

    
26
//#define DEBUG_MMU
27
//#define DEBUG_SLB
28

    
29
#ifdef DEBUG_MMU
30
#  define LOG_MMU(...) qemu_log(__VA_ARGS__)
31
#  define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0)
32
#else
33
#  define LOG_MMU(...) do { } while (0)
34
#  define LOG_MMU_STATE(cpu) do { } while (0)
35
#endif
36

    
37
#ifdef DEBUG_SLB
38
#  define LOG_SLB(...) qemu_log(__VA_ARGS__)
39
#else
40
#  define LOG_SLB(...) do { } while (0)
41
#endif
42

    
43
/*
44
 * Used to indicate whether we have allocated htab in the
45
 * host kernel
46
 */
47
bool kvmppc_kern_htab;
48
/*
49
 * SLB handling
50
 */
51

    
52
static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
53
{
54
    uint64_t esid_256M, esid_1T;
55
    int n;
56

    
57
    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
58

    
59
    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
60
    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
61

    
62
    for (n = 0; n < env->slb_nr; n++) {
63
        ppc_slb_t *slb = &env->slb[n];
64

    
65
        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
66
                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
67
        /* We check for 1T matches on all MMUs here - if the MMU
68
         * doesn't have 1T segment support, we will have prevented 1T
69
         * entries from being inserted in the slbmte code. */
70
        if (((slb->esid == esid_256M) &&
71
             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
72
            || ((slb->esid == esid_1T) &&
73
                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
74
            return slb;
75
        }
76
    }
77

    
78
    return NULL;
79
}
80

    
81
void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
82
{
83
    int i;
84
    uint64_t slbe, slbv;
85

    
86
    cpu_synchronize_state(CPU(ppc_env_get_cpu(env)));
87

    
88
    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
89
    for (i = 0; i < env->slb_nr; i++) {
90
        slbe = env->slb[i].esid;
91
        slbv = env->slb[i].vsid;
92
        if (slbe == 0 && slbv == 0) {
93
            continue;
94
        }
95
        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
96
                    i, slbe, slbv);
97
    }
98
}
99

    
100
void helper_slbia(CPUPPCState *env)
101
{
102
    int n, do_invalidate;
103

    
104
    do_invalidate = 0;
105
    /* XXX: Warning: slbia never invalidates the first segment */
106
    for (n = 1; n < env->slb_nr; n++) {
107
        ppc_slb_t *slb = &env->slb[n];
108

    
109
        if (slb->esid & SLB_ESID_V) {
110
            slb->esid &= ~SLB_ESID_V;
111
            /* XXX: given the fact that segment size is 256 MB or 1TB,
112
             *      and we still don't have a tlb_flush_mask(env, n, mask)
113
             *      in QEMU, we just invalidate all TLBs
114
             */
115
            do_invalidate = 1;
116
        }
117
    }
118
    if (do_invalidate) {
119
        tlb_flush(env, 1);
120
    }
121
}
122

    
123
void helper_slbie(CPUPPCState *env, target_ulong addr)
124
{
125
    ppc_slb_t *slb;
126

    
127
    slb = slb_lookup(env, addr);
128
    if (!slb) {
129
        return;
130
    }
131

    
132
    if (slb->esid & SLB_ESID_V) {
133
        slb->esid &= ~SLB_ESID_V;
134

    
135
        /* XXX: given the fact that segment size is 256 MB or 1TB,
136
         *      and we still don't have a tlb_flush_mask(env, n, mask)
137
         *      in QEMU, we just invalidate all TLBs
138
         */
139
        tlb_flush(env, 1);
140
    }
141
}
142

    
143
int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
144
{
145
    int slot = rb & 0xfff;
146
    ppc_slb_t *slb = &env->slb[slot];
147

    
148
    if (rb & (0x1000 - env->slb_nr)) {
149
        return -1; /* Reserved bits set or slot too high */
150
    }
151
    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
152
        return -1; /* Bad segment size */
153
    }
154
    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
155
        return -1; /* 1T segment on MMU that doesn't support it */
156
    }
157

    
158
    /* Mask out the slot number as we store the entry */
159
    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
160
    slb->vsid = rs;
161

    
162
    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
163
            " %016" PRIx64 "\n", __func__, slot, rb, rs,
164
            slb->esid, slb->vsid);
165

    
166
    return 0;
167
}
168

    
169
static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
170
                             target_ulong *rt)
171
{
172
    int slot = rb & 0xfff;
173
    ppc_slb_t *slb = &env->slb[slot];
174

    
175
    if (slot >= env->slb_nr) {
176
        return -1;
177
    }
178

    
179
    *rt = slb->esid;
180
    return 0;
181
}
182

    
183
static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
184
                             target_ulong *rt)
185
{
186
    int slot = rb & 0xfff;
187
    ppc_slb_t *slb = &env->slb[slot];
188

    
189
    if (slot >= env->slb_nr) {
190
        return -1;
191
    }
192

    
193
    *rt = slb->vsid;
194
    return 0;
195
}
196

    
197
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
198
{
199
    if (ppc_store_slb(env, rb, rs) < 0) {
200
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
201
                                   POWERPC_EXCP_INVAL);
202
    }
203
}
204

    
205
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
206
{
207
    target_ulong rt = 0;
208

    
209
    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
210
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
211
                                   POWERPC_EXCP_INVAL);
212
    }
213
    return rt;
214
}
215

    
216
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
217
{
218
    target_ulong rt = 0;
219

    
220
    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
221
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
222
                                   POWERPC_EXCP_INVAL);
223
    }
224
    return rt;
225
}
226

    
227
/*
228
 * 64-bit hash table MMU handling
229
 */
230

    
231
static int ppc_hash64_pte_prot(CPUPPCState *env,
232
                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
233
{
234
    unsigned pp, key;
235
    /* Some pp bit combinations have undefined behaviour, so default
236
     * to no access in those cases */
237
    int prot = 0;
238

    
239
    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
240
             : (slb->vsid & SLB_VSID_KS));
241
    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
242

    
243
    if (key == 0) {
244
        switch (pp) {
245
        case 0x0:
246
        case 0x1:
247
        case 0x2:
248
            prot = PAGE_READ | PAGE_WRITE;
249
            break;
250

    
251
        case 0x3:
252
        case 0x6:
253
            prot = PAGE_READ;
254
            break;
255
        }
256
    } else {
257
        switch (pp) {
258
        case 0x0:
259
        case 0x6:
260
            prot = 0;
261
            break;
262

    
263
        case 0x1:
264
        case 0x3:
265
            prot = PAGE_READ;
266
            break;
267

    
268
        case 0x2:
269
            prot = PAGE_READ | PAGE_WRITE;
270
            break;
271
        }
272
    }
273

    
274
    /* No execute if either noexec or guarded bits set */
275
    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
276
        || (slb->vsid & SLB_VSID_N)) {
277
        prot |= PAGE_EXEC;
278
    }
279

    
280
    return prot;
281
}
282

    
283
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
284
{
285
    int key, amrbits;
286
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
287

    
288

    
289
    /* Only recent MMUs implement Virtual Page Class Key Protection */
290
    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
291
        return prot;
292
    }
293

    
294
    key = HPTE64_R_KEY(pte.pte1);
295
    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
296

    
297
    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
298
    /*         env->spr[SPR_AMR]); */
299

    
300
    /*
301
     * A store is permitted if the AMR bit is 0. Remove write
302
     * protection if it is set.
303
     */
304
    if (amrbits & 0x2) {
305
        prot &= ~PAGE_WRITE;
306
    }
307
    /*
308
     * A load is permitted if the AMR bit is 0. Remove read
309
     * protection if it is set.
310
     */
311
    if (amrbits & 0x1) {
312
        prot &= ~PAGE_READ;
313
    }
314

    
315
    return prot;
316
}
317

    
318
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
319
{
320
    uint64_t token = 0;
321
    hwaddr pte_offset;
322

    
323
    pte_offset = pte_index * HASH_PTE_SIZE_64;
324
    if (kvmppc_kern_htab) {
325
        /*
326
         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
327
         */
328
        token = kvmppc_hash64_read_pteg(cpu, pte_index);
329
        if (token) {
330
            return token;
331
        }
332
        /*
333
         * pteg read failed, even though we have allocated htab via
334
         * kvmppc_reset_htab.
335
         */
336
        return 0;
337
    }
338
    /*
339
     * HTAB is controlled by QEMU. Just point to the internally
340
     * accessible PTEG.
341
     */
342
    if (cpu->env.external_htab) {
343
        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
344
    } else if (cpu->env.htab_base) {
345
        token = cpu->env.htab_base + pte_offset;
346
    }
347
    return token;
348
}
349

    
350
void ppc_hash64_stop_access(uint64_t token)
351
{
352
    if (kvmppc_kern_htab) {
353
        return kvmppc_hash64_free_pteg(token);
354
    }
355
}
356

    
357
static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
358
                                     bool secondary, target_ulong ptem,
359
                                     ppc_hash_pte64_t *pte)
360
{
361
    int i;
362
    uint64_t token;
363
    target_ulong pte0, pte1;
364
    target_ulong pte_index;
365

    
366
    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
367
    token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
368
    if (!token) {
369
        return -1;
370
    }
371
    for (i = 0; i < HPTES_PER_GROUP; i++) {
372
        pte0 = ppc_hash64_load_hpte0(env, token, i);
373
        pte1 = ppc_hash64_load_hpte1(env, token, i);
374

    
375
        if ((pte0 & HPTE64_V_VALID)
376
            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
377
            && HPTE64_V_COMPARE(pte0, ptem)) {
378
            pte->pte0 = pte0;
379
            pte->pte1 = pte1;
380
            ppc_hash64_stop_access(token);
381
            return (pte_index + i) * HASH_PTE_SIZE_64;
382
        }
383
    }
384
    ppc_hash64_stop_access(token);
385
    /*
386
     * We didn't find a valid entry.
387
     */
388
    return -1;
389
}
390

    
391
static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
392
                                     ppc_slb_t *slb, target_ulong eaddr,
393
                                     ppc_hash_pte64_t *pte)
394
{
395
    hwaddr pte_offset;
396
    hwaddr hash;
397
    uint64_t vsid, epnshift, epnmask, epn, ptem;
398

    
399
    /* Page size according to the SLB, which we use to generate the
400
     * EPN for hash table lookup..  When we implement more recent MMU
401
     * extensions this might be different from the actual page size
402
     * encoded in the PTE */
403
    epnshift = (slb->vsid & SLB_VSID_L)
404
        ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
405
    epnmask = ~((1ULL << epnshift) - 1);
406

    
407
    if (slb->vsid & SLB_VSID_B) {
408
        /* 1TB segment */
409
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
410
        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
411
        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
412
    } else {
413
        /* 256M segment */
414
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
415
        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
416
        hash = vsid ^ (epn >> epnshift);
417
    }
418
    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
419

    
420
    /* Page address translation */
421
    LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
422
            " hash " TARGET_FMT_plx "\n",
423
            env->htab_base, env->htab_mask, hash);
424

    
425
    /* Primary PTEG lookup */
426
    LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
427
            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
428
            " hash=" TARGET_FMT_plx "\n",
429
            env->htab_base, env->htab_mask, vsid, ptem,  hash);
430
    pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
431

    
432
    if (pte_offset == -1) {
433
        /* Secondary PTEG lookup */
434
        LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
435
                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
436
                " hash=" TARGET_FMT_plx "\n", env->htab_base,
437
                env->htab_mask, vsid, ptem, ~hash);
438

    
439
        pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
440
    }
441

    
442
    return pte_offset;
443
}
444

    
445
static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
446
                                   target_ulong eaddr)
447
{
448
    hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
449
    /* FIXME: Add support for SLLP extended page sizes */
450
    int target_page_bits = (slb->vsid & SLB_VSID_L)
451
        ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
452
    hwaddr mask = (1ULL << target_page_bits) - 1;
453

    
454
    return (rpn & ~mask) | (eaddr & mask);
455
}
456

    
457
int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
458
                                int rwx, int mmu_idx)
459
{
460
    ppc_slb_t *slb;
461
    hwaddr pte_offset;
462
    ppc_hash_pte64_t pte;
463
    int pp_prot, amr_prot, prot;
464
    uint64_t new_pte1;
465
    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
466
    hwaddr raddr;
467

    
468
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));
469

    
470
    /* 1. Handle real mode accesses */
471
    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
472
        /* Translation is off */
473
        /* In real mode the top 4 effective address bits are ignored */
474
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
475
        tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
476
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
477
                     TARGET_PAGE_SIZE);
478
        return 0;
479
    }
480

    
481
    /* 2. Translation is on, so look up the SLB */
482
    slb = slb_lookup(env, eaddr);
483

    
484
    if (!slb) {
485
        if (rwx == 2) {
486
            env->exception_index = POWERPC_EXCP_ISEG;
487
            env->error_code = 0;
488
        } else {
489
            env->exception_index = POWERPC_EXCP_DSEG;
490
            env->error_code = 0;
491
            env->spr[SPR_DAR] = eaddr;
492
        }
493
        return 1;
494
    }
495

    
496
    /* 3. Check for segment level no-execute violation */
497
    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
498
        env->exception_index = POWERPC_EXCP_ISI;
499
        env->error_code = 0x10000000;
500
        return 1;
501
    }
502

    
503
    /* 4. Locate the PTE in the hash table */
504
    pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
505
    if (pte_offset == -1) {
506
        if (rwx == 2) {
507
            env->exception_index = POWERPC_EXCP_ISI;
508
            env->error_code = 0x40000000;
509
        } else {
510
            env->exception_index = POWERPC_EXCP_DSI;
511
            env->error_code = 0;
512
            env->spr[SPR_DAR] = eaddr;
513
            if (rwx == 1) {
514
                env->spr[SPR_DSISR] = 0x42000000;
515
            } else {
516
                env->spr[SPR_DSISR] = 0x40000000;
517
            }
518
        }
519
        return 1;
520
    }
521
    LOG_MMU("found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
522

    
523
    /* 5. Check access permissions */
524

    
525
    pp_prot = ppc_hash64_pte_prot(env, slb, pte);
526
    amr_prot = ppc_hash64_amr_prot(env, pte);
527
    prot = pp_prot & amr_prot;
528

    
529
    if ((need_prot[rwx] & ~prot) != 0) {
530
        /* Access right violation */
531
        LOG_MMU("PTE access rejected\n");
532
        if (rwx == 2) {
533
            env->exception_index = POWERPC_EXCP_ISI;
534
            env->error_code = 0x08000000;
535
        } else {
536
            target_ulong dsisr = 0;
537

    
538
            env->exception_index = POWERPC_EXCP_DSI;
539
            env->error_code = 0;
540
            env->spr[SPR_DAR] = eaddr;
541
            if (need_prot[rwx] & ~pp_prot) {
542
                dsisr |= 0x08000000;
543
            }
544
            if (rwx == 1) {
545
                dsisr |= 0x02000000;
546
            }
547
            if (need_prot[rwx] & ~amr_prot) {
548
                dsisr |= 0x00200000;
549
            }
550
            env->spr[SPR_DSISR] = dsisr;
551
        }
552
        return 1;
553
    }
554

    
555
    LOG_MMU("PTE access granted !\n");
556

    
557
    /* 6. Update PTE referenced and changed bits if necessary */
558

    
559
    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
560
    if (rwx == 1) {
561
        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
562
    } else {
563
        /* Treat the page as read-only for now, so that a later write
564
         * will pass through this function again to set the C bit */
565
        prot &= ~PAGE_WRITE;
566
    }
567

    
568
    if (new_pte1 != pte.pte1) {
569
        ppc_hash64_store_hpte1(env, pte_offset, new_pte1);
570
    }
571

    
572
    /* 7. Determine the real address from the PTE */
573

    
574
    raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
575

    
576
    tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
577
                 prot, mmu_idx, TARGET_PAGE_SIZE);
578

    
579
    return 0;
580
}
581

    
582
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
583
{
584
    ppc_slb_t *slb;
585
    hwaddr pte_offset;
586
    ppc_hash_pte64_t pte;
587

    
588
    if (msr_dr == 0) {
589
        /* In real mode the top 4 effective address bits are ignored */
590
        return addr & 0x0FFFFFFFFFFFFFFFULL;
591
    }
592

    
593
    slb = slb_lookup(env, addr);
594
    if (!slb) {
595
        return -1;
596
    }
597

    
598
    pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte);
599
    if (pte_offset == -1) {
600
        return -1;
601
    }
602

    
603
    return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
604
}