Revision 4656e1f0 target-ppc/kvm.c

b/target-ppc/kvm.c
18 18
#include <sys/types.h>
19 19
#include <sys/ioctl.h>
20 20
#include <sys/mman.h>
21
#include <sys/vfs.h>
21 22

  
22 23
#include <linux/kvm.h>
23 24

  
......
167 168
    return 0;
168 169
}
169 170

  
171

  
172
#if defined(TARGET_PPC64)
173
static void kvm_get_fallback_smmu_info(CPUPPCState *env,
174
                                       struct kvm_ppc_smmu_info *info)
175
{
176
    memset(info, 0, sizeof(*info));
177

  
178
    /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
179
     * need to "guess" what the supported page sizes are.
180
     *
181
     * For that to work we make a few assumptions:
182
     *
183
     * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
184
     *   KVM which only supports 4K and 16M pages, but supports them
185
     *   regardless of the backing store characteritics. We also don't
186
     *   support 1T segments.
187
     *
188
     *   This is safe as if HV KVM ever supports that capability or PR
189
     *   KVM grows supports for more page/segment sizes, those versions
190
     *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
191
     *   will not hit this fallback
192
     *
193
     * - Else we are running HV KVM. This means we only support page
194
     *   sizes that fit in the backing store. Additionally we only
195
     *   advertize 64K pages if the processor is ARCH 2.06 and we assume
196
     *   P7 encodings for the SLB and hash table. Here too, we assume
197
     *   support for any newer processor will mean a kernel that
198
     *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
199
     *   this fallback.
200
     */
201
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
202
        /* No flags */
203
        info->flags = 0;
204
        info->slb_size = 64;
205

  
206
        /* Standard 4k base page size segment */
207
        info->sps[0].page_shift = 12;
208
        info->sps[0].slb_enc = 0;
209
        info->sps[0].enc[0].page_shift = 12;
210
        info->sps[0].enc[0].pte_enc = 0;
211

  
212
        /* Standard 16M large page size segment */
213
        info->sps[1].page_shift = 24;
214
        info->sps[1].slb_enc = SLB_VSID_L;
215
        info->sps[1].enc[0].page_shift = 24;
216
        info->sps[1].enc[0].pte_enc = 0;
217
    } else {
218
        int i = 0;
219

  
220
        /* HV KVM has backing store size restrictions */
221
        info->flags = KVM_PPC_PAGE_SIZES_REAL;
222

  
223
        if (env->mmu_model & POWERPC_MMU_1TSEG) {
224
            info->flags |= KVM_PPC_1T_SEGMENTS;
225
        }
226

  
227
        if (env->mmu_model == POWERPC_MMU_2_06) {
228
            info->slb_size = 32;
229
        } else {
230
            info->slb_size = 64;
231
        }
232

  
233
        /* Standard 4k base page size segment */
234
        info->sps[i].page_shift = 12;
235
        info->sps[i].slb_enc = 0;
236
        info->sps[i].enc[0].page_shift = 12;
237
        info->sps[i].enc[0].pte_enc = 0;
238
        i++;
239

  
240
        /* 64K on MMU 2.06 */
241
        if (env->mmu_model == POWERPC_MMU_2_06) {
242
            info->sps[i].page_shift = 16;
243
            info->sps[i].slb_enc = 0x110;
244
            info->sps[i].enc[0].page_shift = 16;
245
            info->sps[i].enc[0].pte_enc = 1;
246
            i++;
247
        }
248

  
249
        /* Standard 16M large page size segment */
250
        info->sps[i].page_shift = 24;
251
        info->sps[i].slb_enc = SLB_VSID_L;
252
        info->sps[i].enc[0].page_shift = 24;
253
        info->sps[i].enc[0].pte_enc = 0;
254
    }
255
}
256

  
257
static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info)
258
{
259
    int ret;
260

  
261
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
262
        ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
263
        if (ret == 0) {
264
            return;
265
        }
266
    }
267

  
268
    kvm_get_fallback_smmu_info(env, info);
269
}
270

  
271
static long getrampagesize(void)
272
{
273
    struct statfs fs;
274
    int ret;
275

  
276
    if (!mem_path) {
277
        /* guest RAM is backed by normal anonymous pages */
278
        return getpagesize();
279
    }
280

  
281
    do {
282
        ret = statfs(mem_path, &fs);
283
    } while (ret != 0 && errno == EINTR);
284

  
285
    if (ret != 0) {
286
        fprintf(stderr, "Couldn't statfs() memory path: %s\n",
287
                strerror(errno));
288
        exit(1);
289
    }
290

  
291
#define HUGETLBFS_MAGIC       0x958458f6
292

  
293
    if (fs.f_type != HUGETLBFS_MAGIC) {
294
        /* Explicit mempath, but it's ordinary pages */
295
        return getpagesize();
296
    }
297

  
298
    /* It's hugepage, return the huge page size */
299
    return fs.f_bsize;
300
}
301

  
302
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
303
{
304
    if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
305
        return true;
306
    }
307

  
308
    return (1ul << shift) <= rampgsize;
309
}
310

  
311
static void kvm_fixup_page_sizes(CPUPPCState *env)
312
{
313
    static struct kvm_ppc_smmu_info smmu_info;
314
    static bool has_smmu_info;
315
    long rampagesize;
316
    int iq, ik, jq, jk;
317

  
318
    /* We only handle page sizes for 64-bit server guests for now */
319
    if (!(env->mmu_model & POWERPC_MMU_64)) {
320
        return;
321
    }
322

  
323
    /* Collect MMU info from kernel if not already */
324
    if (!has_smmu_info) {
325
        kvm_get_smmu_info(env, &smmu_info);
326
        has_smmu_info = true;
327
    }
328

  
329
    rampagesize = getrampagesize();
330

  
331
    /* Convert to QEMU form */
332
    memset(&env->sps, 0, sizeof(env->sps));
333

  
334
    for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
335
        struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
336
        struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
337

  
338
        if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
339
                                 ksps->page_shift)) {
340
            continue;
341
        }
342
        qsps->page_shift = ksps->page_shift;
343
        qsps->slb_enc = ksps->slb_enc;
344
        for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
345
            if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
346
                                     ksps->enc[jk].page_shift)) {
347
                continue;
348
            }
349
            qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
350
            qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
351
            if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
352
                break;
353
            }
354
        }
355
        if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
356
            break;
357
        }
358
    }
359
    env->slb_nr = smmu_info.slb_size;
360
    if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) {
361
        env->mmu_model |= POWERPC_MMU_1TSEG;
362
    } else {
363
        env->mmu_model &= ~POWERPC_MMU_1TSEG;
364
    }
365
}
366
#else /* defined (TARGET_PPC64) */
367

  
368
static inline void kvm_fixup_page_sizes(CPUPPCState *env)
369
{
370
}
371

  
372
#endif /* !defined (TARGET_PPC64) */
373

  
170 374
int kvm_arch_init_vcpu(CPUPPCState *cenv)
171 375
{
172 376
    int ret;
173 377

  
378
    /* Gather server mmu info from KVM and update the CPU state */
379
    kvm_fixup_page_sizes(cenv);
380

  
381
    /* Synchronize sregs with kvm */
174 382
    ret = kvm_arch_sync_sregs(cenv);
175 383
    if (ret) {
176 384
        return ret;

Also available in: Unified diff