Revision f43e3525

b/hw/spapr.c
52 52
                              sPAPREnvironment *spapr,
53 53
                              target_phys_addr_t initrd_base,
54 54
                              target_phys_addr_t initrd_size,
55
                              const char *kernel_cmdline)
55
                              const char *kernel_cmdline,
56
                              long hash_shift)
56 57
{
57 58
    void *fdt;
58 59
    uint64_t mem_reg_property[] = { 0, cpu_to_be64(ramsize) };
59 60
    uint32_t start_prop = cpu_to_be32(initrd_base);
60 61
    uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
62
    uint32_t pft_size_prop[] = {0, cpu_to_be32(hash_shift)};
63
    char hypertas_prop[] = "hcall-pft\0hcall-term";
61 64
    int i;
62 65
    char *modelname;
63 66
    int ret;
......
145 148
         * full emu, for kvm we should copy it from the host */
146 149
        _FDT((fdt_property_cell(fdt, "clock-frequency", 1000000000)));
147 150
        _FDT((fdt_property_cell(fdt, "ibm,slb-size", env->slb_nr)));
151
        _FDT((fdt_property(fdt, "ibm,pft-size",
152
                           pft_size_prop, sizeof(pft_size_prop))));
148 153
        _FDT((fdt_property_string(fdt, "status", "okay")));
149 154
        _FDT((fdt_property(fdt, "64-bit", NULL, 0)));
150 155

  
......
160 165

  
161 166
    _FDT((fdt_end_node(fdt)));
162 167

  
168
    /* RTAS */
169
    _FDT((fdt_begin_node(fdt, "rtas")));
170

  
171
    _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop,
172
                       sizeof(hypertas_prop))));
173

  
174
    _FDT((fdt_end_node(fdt)));
175

  
163 176
    /* vdevice */
164 177
    _FDT((fdt_begin_node(fdt, "vdevice")));
165 178

  
......
208 221
                           const char *cpu_model)
209 222
{
210 223
    CPUState *envs[MAX_CPUS];
211
    void *fdt;
224
    void *fdt, *htab;
212 225
    int i;
213 226
    ram_addr_t ram_offset;
214 227
    target_phys_addr_t fdt_addr;
215 228
    uint32_t kernel_base, initrd_base;
216
    long kernel_size, initrd_size;
229
    long kernel_size, initrd_size, htab_size;
230
    long pteg_shift = 17;
217 231
    int fdt_size;
218 232

  
219 233
    spapr = qemu_malloc(sizeof(*spapr));
......
250 264
    ram_offset = qemu_ram_alloc(NULL, "ppc_spapr.ram", ram_size);
251 265
    cpu_register_physical_memory(0, ram_size, ram_offset);
252 266

  
267
    /* allocate hash page table.  For now we always make this 16mb,
268
     * later we should probably make it scale to the size of guest
269
     * RAM */
270
    htab_size = 1ULL << (pteg_shift + 7);
271
    htab = qemu_mallocz(htab_size);
272

  
273
    for (i = 0; i < smp_cpus; i++) {
274
        envs[i]->external_htab = htab;
275
        envs[i]->htab_base = -1;
276
        envs[i]->htab_mask = htab_size - 1;
277
    }
278

  
253 279
    spapr->vio_bus = spapr_vio_bus_init();
254 280

  
255 281
    for (i = 0; i < MAX_SERIAL_PORTS; i++) {
......
296 322

  
297 323
    /* Prepare the device tree */
298 324
    fdt = spapr_create_fdt(&fdt_size, ram_size, cpu_model, envs, spapr,
299
                           initrd_base, initrd_size, kernel_cmdline);
325
                           initrd_base, initrd_size, kernel_cmdline,
326
                           pteg_shift + 7);
300 327
    assert(fdt != NULL);
301 328

  
302 329
    cpu_physical_memory_write(fdt_addr, fdt, fdt_size);
b/hw/spapr_hcall.c
1 1
#include "sysemu.h"
2 2
#include "cpu.h"
3 3
#include "qemu-char.h"
4
#include "sysemu.h"
5
#include "qemu-char.h"
6
#include "exec-all.h"
4 7
#include "hw/spapr.h"
5 8

  
9
#define HPTES_PER_GROUP 8
10

  
11
#define HPTE_V_SSIZE_SHIFT      62
12
#define HPTE_V_AVPN_SHIFT       7
13
#define HPTE_V_AVPN             0x3fffffffffffff80ULL
14
#define HPTE_V_AVPN_VAL(x)      (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
15
#define HPTE_V_COMPARE(x, y)    (!(((x) ^ (y)) & 0xffffffffffffff80UL))
16
#define HPTE_V_BOLTED           0x0000000000000010ULL
17
#define HPTE_V_LOCK             0x0000000000000008ULL
18
#define HPTE_V_LARGE            0x0000000000000004ULL
19
#define HPTE_V_SECONDARY        0x0000000000000002ULL
20
#define HPTE_V_VALID            0x0000000000000001ULL
21

  
22
#define HPTE_R_PP0              0x8000000000000000ULL
23
#define HPTE_R_TS               0x4000000000000000ULL
24
#define HPTE_R_KEY_HI           0x3000000000000000ULL
25
#define HPTE_R_RPN_SHIFT        12
26
#define HPTE_R_RPN              0x3ffffffffffff000ULL
27
#define HPTE_R_FLAGS            0x00000000000003ffULL
28
#define HPTE_R_PP               0x0000000000000003ULL
29
#define HPTE_R_N                0x0000000000000004ULL
30
#define HPTE_R_G                0x0000000000000008ULL
31
#define HPTE_R_M                0x0000000000000010ULL
32
#define HPTE_R_I                0x0000000000000020ULL
33
#define HPTE_R_W                0x0000000000000040ULL
34
#define HPTE_R_WIMG             0x0000000000000078ULL
35
#define HPTE_R_C                0x0000000000000080ULL
36
#define HPTE_R_R                0x0000000000000100ULL
37
#define HPTE_R_KEY_LO           0x0000000000000e00ULL
38

  
39
#define HPTE_V_1TB_SEG          0x4000000000000000ULL
40
#define HPTE_V_VRMA_MASK        0x4001ffffff000000ULL
41

  
42
#define HPTE_V_HVLOCK           0x40ULL
43

  
44
static inline int lock_hpte(void *hpte, target_ulong bits)
45
{
46
    uint64_t pteh;
47

  
48
    pteh = ldq_p(hpte);
49

  
50
    /* We're protected by qemu's global lock here */
51
    if (pteh & bits) {
52
        return 0;
53
    }
54
    stq_p(hpte, pteh | HPTE_V_HVLOCK);
55
    return 1;
56
}
57

  
58
static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
59
                                     target_ulong pte_index)
60
{
61
    target_ulong rb, va_low;
62

  
63
    rb = (v & ~0x7fULL) << 16; /* AVA field */
64
    va_low = pte_index >> 3;
65
    if (v & HPTE_V_SECONDARY) {
66
        va_low = ~va_low;
67
    }
68
    /* xor vsid from AVA */
69
    if (!(v & HPTE_V_1TB_SEG)) {
70
        va_low ^= v >> 12;
71
    } else {
72
        va_low ^= v >> 24;
73
    }
74
    va_low &= 0x7ff;
75
    if (v & HPTE_V_LARGE) {
76
        rb |= 1;                         /* L field */
77
#if 0 /* Disable that P7 specific bit for now */
78
        if (r & 0xff000) {
79
            /* non-16MB large page, must be 64k */
80
            /* (masks depend on page size) */
81
            rb |= 0x1000;                /* page encoding in LP field */
82
            rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
83
            rb |= (va_low & 0xfe);       /* AVAL field */
84
        }
85
#endif
86
    } else {
87
        /* 4kB page */
88
        rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of AVA */
89
    }
90
    rb |= (v >> 54) & 0x300;            /* B field */
91
    return rb;
92
}
93

  
94
static target_ulong h_enter(CPUState *env, sPAPREnvironment *spapr,
95
                            target_ulong opcode, target_ulong *args)
96
{
97
    target_ulong flags = args[0];
98
    target_ulong pte_index = args[1];
99
    target_ulong pteh = args[2];
100
    target_ulong ptel = args[3];
101
    target_ulong porder;
102
    target_ulong i, pa;
103
    uint8_t *hpte;
104

  
105
    /* only handle 4k and 16M pages for now */
106
    porder = 12;
107
    if (pteh & HPTE_V_LARGE) {
108
#if 0 /* We don't support 64k pages yet */
109
        if ((ptel & 0xf000) == 0x1000) {
110
            /* 64k page */
111
            porder = 16;
112
        } else
113
#endif
114
        if ((ptel & 0xff000) == 0) {
115
            /* 16M page */
116
            porder = 24;
117
            /* lowest AVA bit must be 0 for 16M pages */
118
            if (pteh & 0x80) {
119
                return H_PARAMETER;
120
            }
121
        } else {
122
            return H_PARAMETER;
123
        }
124
    }
125

  
126
    pa = ptel & HPTE_R_RPN;
127
    /* FIXME: bounds check the pa? */
128

  
129
    /* Check WIMG */
130
    if ((ptel & HPTE_R_WIMG) != HPTE_R_M) {
131
        return H_PARAMETER;
132
    }
133
    pteh &= ~0x60ULL;
134

  
135
    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
136
        return H_PARAMETER;
137
    }
138
    if (likely((flags & H_EXACT) == 0)) {
139
        pte_index &= ~7ULL;
140
        hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
141
        for (i = 0; ; ++i) {
142
            if (i == 8) {
143
                return H_PTEG_FULL;
144
            }
145
            if (((ldq_p(hpte) & HPTE_V_VALID) == 0) &&
146
                lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
147
                break;
148
            }
149
            hpte += HASH_PTE_SIZE_64;
150
        }
151
    } else {
152
        i = 0;
153
        hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
154
        if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
155
            return H_PTEG_FULL;
156
        }
157
    }
158
    stq_p(hpte + (HASH_PTE_SIZE_64/2), ptel);
159
    /* eieio();  FIXME: need some sort of barrier for smp? */
160
    stq_p(hpte, pteh);
161

  
162
    assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
163
    args[0] = pte_index + i;
164
    return H_SUCCESS;
165
}
166

  
167
static target_ulong h_remove(CPUState *env, sPAPREnvironment *spapr,
168
                             target_ulong opcode, target_ulong *args)
169
{
170
    target_ulong flags = args[0];
171
    target_ulong pte_index = args[1];
172
    target_ulong avpn = args[2];
173
    uint8_t *hpte;
174
    target_ulong v, r, rb;
175

  
176
    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
177
        return H_PARAMETER;
178
    }
179

  
180
    hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
181
    while (!lock_hpte(hpte, HPTE_V_HVLOCK)) {
182
        /* We have no real concurrency in qemu soft-emulation, so we
183
         * will never actually have a contested lock */
184
        assert(0);
185
    }
186

  
187
    v = ldq_p(hpte);
188
    r = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
189

  
190
    if ((v & HPTE_V_VALID) == 0 ||
191
        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
192
        ((flags & H_ANDCOND) && (v & avpn) != 0)) {
193
        stq_p(hpte, v & ~HPTE_V_HVLOCK);
194
        assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
195
        return H_NOT_FOUND;
196
    }
197
    args[0] = v & ~HPTE_V_HVLOCK;
198
    args[1] = r;
199
    stq_p(hpte, 0);
200
    rb = compute_tlbie_rb(v, r, pte_index);
201
    ppc_tlb_invalidate_one(env, rb);
202
    assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
203
    return H_SUCCESS;
204
}
205

  
206
static target_ulong h_protect(CPUState *env, sPAPREnvironment *spapr,
207
                              target_ulong opcode, target_ulong *args)
208
{
209
    target_ulong flags = args[0];
210
    target_ulong pte_index = args[1];
211
    target_ulong avpn = args[2];
212
    uint8_t *hpte;
213
    target_ulong v, r, rb;
214

  
215
    if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) {
216
        return H_PARAMETER;
217
    }
218

  
219
    hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
220
    while (!lock_hpte(hpte, HPTE_V_HVLOCK)) {
221
        /* We have no real concurrency in qemu soft-emulation, so we
222
         * will never actually have a contested lock */
223
        assert(0);
224
    }
225

  
226
    v = ldq_p(hpte);
227
    r = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
228

  
229
    if ((v & HPTE_V_VALID) == 0 ||
230
        ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
231
        stq_p(hpte, v & ~HPTE_V_HVLOCK);
232
        assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
233
        return H_NOT_FOUND;
234
    }
235

  
236
    r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
237
           HPTE_R_KEY_HI | HPTE_R_KEY_LO);
238
    r |= (flags << 55) & HPTE_R_PP0;
239
    r |= (flags << 48) & HPTE_R_KEY_HI;
240
    r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
241
    rb = compute_tlbie_rb(v, r, pte_index);
242
    stq_p(hpte, v & ~HPTE_V_VALID);
243
    ppc_tlb_invalidate_one(env, rb);
244
    stq_p(hpte + (HASH_PTE_SIZE_64/2), r);
245
    /* Don't need a memory barrier, due to qemu's global lock */
246
    stq_p(hpte, v & ~HPTE_V_HVLOCK);
247
    assert(!(ldq_p(hpte) & HPTE_V_HVLOCK));
248
    return H_SUCCESS;
249
}
250

  
6 251
spapr_hcall_fn hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
7 252

  
8 253
void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
......
39 284
    hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
40 285
    return H_FUNCTION;
41 286
}
287

  
288
static void hypercall_init(void)
289
{
290
    /* hcall-pft */
291
    spapr_register_hypercall(H_ENTER, h_enter);
292
    spapr_register_hypercall(H_REMOVE, h_remove);
293
    spapr_register_hypercall(H_PROTECT, h_protect);
294
}
295
device_init(hypercall_init);
b/target-ppc/cpu.h
670 670
    target_phys_addr_t htab_base;
671 671
    target_phys_addr_t htab_mask;
672 672
    target_ulong sr[32];
673
    /* externally stored hash table */
674
    uint8_t *external_htab;
673 675
    /* BATs */
674 676
    int nb_BATs;
675 677
    target_ulong DBAT[2][8];
b/target-ppc/helper.c
589 589
    for (i = 0; i < 8; i++) {
590 590
#if defined(TARGET_PPC64)
591 591
        if (is_64b) {
592
            pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16));
593
            pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8);
592
            if (env->external_htab) {
593
                pte0 = ldq_p(env->external_htab + pteg_off + (i * 16));
594
                pte1 = ldq_p(env->external_htab + pteg_off + (i * 16) + 8);
595
            } else {
596
                pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16));
597
                pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8);
598
            }
594 599

  
595 600
            /* We have a TLB that saves 4K pages, so let's
596 601
             * split a huge page to 4k chunks */
......
606 611
        } else
607 612
#endif
608 613
        {
609
            pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8));
610
            pte1 =  ldl_phys(env->htab_base + pteg_off + (i * 8) + 4);
614
            if (env->external_htab) {
615
                pte0 = ldl_p(env->external_htab + pteg_off + (i * 8));
616
                pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4);
617
            } else {
618
                pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8));
619
                pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4);
620
            }
611 621
            r = pte32_check(ctx, pte0, pte1, h, rw, type);
612 622
            LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
613 623
                    TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
......
647 657
        if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
648 658
#if defined(TARGET_PPC64)
649 659
            if (is_64b) {
650
                stq_phys_notdirty(env->htab_base + pteg_off + (good * 16) + 8,
651
                                  pte1);
660
                if (env->external_htab) {
661
                    stq_p(env->external_htab + pteg_off + (good * 16) + 8,
662
                          pte1);
663
                } else {
664
                    stq_phys_notdirty(env->htab_base + pteg_off +
665
                                      (good * 16) + 8, pte1);
666
                }
652 667
            } else
653 668
#endif
654 669
            {
655
                stl_phys_notdirty(env->htab_base + pteg_off + (good * 8) + 4,
656
                                  pte1);
670
                if (env->external_htab) {
671
                    stl_p(env->external_htab + pteg_off + (good * 8) + 4,
672
                          pte1);
673
                } else {
674
                    stl_phys_notdirty(env->htab_base + pteg_off +
675
                                      (good * 8) + 4, pte1);
676
                }
657 677
            }
658 678
        }
659 679
    }

Also available in: Unified diff