Statistics
| Branch: | Revision:

root / target-ppc / kvm.c @ efd7f486

History | View | Annotate | Download (33.3 kB)

1 d76d1650 aurel32
/*
2 d76d1650 aurel32
 * PowerPC implementation of KVM hooks
3 d76d1650 aurel32
 *
4 d76d1650 aurel32
 * Copyright IBM Corp. 2007
5 90dc8812 Scott Wood
 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 d76d1650 aurel32
 *
7 d76d1650 aurel32
 * Authors:
8 d76d1650 aurel32
 *  Jerone Young <jyoung5@us.ibm.com>
9 d76d1650 aurel32
 *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 d76d1650 aurel32
 *  Hollis Blanchard <hollisb@us.ibm.com>
11 d76d1650 aurel32
 *
12 d76d1650 aurel32
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 d76d1650 aurel32
 * See the COPYING file in the top-level directory.
14 d76d1650 aurel32
 *
15 d76d1650 aurel32
 */
16 d76d1650 aurel32
17 eadaada1 Alexander Graf
#include <dirent.h>
18 d76d1650 aurel32
#include <sys/types.h>
19 d76d1650 aurel32
#include <sys/ioctl.h>
20 d76d1650 aurel32
#include <sys/mman.h>
21 4656e1f0 Benjamin Herrenschmidt
#include <sys/vfs.h>
22 d76d1650 aurel32
23 d76d1650 aurel32
#include <linux/kvm.h>
24 d76d1650 aurel32
25 d76d1650 aurel32
#include "qemu-common.h"
26 d76d1650 aurel32
#include "qemu-timer.h"
27 d76d1650 aurel32
#include "sysemu.h"
28 d76d1650 aurel32
#include "kvm.h"
29 d76d1650 aurel32
#include "kvm_ppc.h"
30 d76d1650 aurel32
#include "cpu.h"
31 12b1143b David Gibson
#include "cpus.h"
32 d76d1650 aurel32
#include "device_tree.h"
33 0f5cb298 David Gibson
#include "hw/sysbus.h"
34 e97c3636 David Gibson
#include "hw/spapr.h"
35 d76d1650 aurel32
36 f61b4bed Alexander Graf
#include "hw/sysbus.h"
37 f61b4bed Alexander Graf
#include "hw/spapr.h"
38 f61b4bed Alexander Graf
#include "hw/spapr_vio.h"
39 f61b4bed Alexander Graf
40 d76d1650 aurel32
//#define DEBUG_KVM
41 d76d1650 aurel32
42 d76d1650 aurel32
#ifdef DEBUG_KVM
43 d76d1650 aurel32
#define dprintf(fmt, ...) \
44 d76d1650 aurel32
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
45 d76d1650 aurel32
#else
46 d76d1650 aurel32
#define dprintf(fmt, ...) \
47 d76d1650 aurel32
    do { } while (0)
48 d76d1650 aurel32
#endif
49 d76d1650 aurel32
50 eadaada1 Alexander Graf
#define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
51 eadaada1 Alexander Graf
52 94a8d39a Jan Kiszka
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
53 94a8d39a Jan Kiszka
    KVM_CAP_LAST_INFO
54 94a8d39a Jan Kiszka
};
55 94a8d39a Jan Kiszka
56 fc87e185 Alexander Graf
static int cap_interrupt_unset = false;
57 fc87e185 Alexander Graf
static int cap_interrupt_level = false;
58 90dc8812 Scott Wood
static int cap_segstate;
59 90dc8812 Scott Wood
static int cap_booke_sregs;
60 e97c3636 David Gibson
static int cap_ppc_smt;
61 354ac20a David Gibson
static int cap_ppc_rma;
62 0f5cb298 David Gibson
static int cap_spapr_tce;
63 f1af19d7 David Gibson
static int cap_hior;
64 fc87e185 Alexander Graf
65 c821c2bd Alexander Graf
/* XXX We have a race condition where we actually have a level triggered
66 c821c2bd Alexander Graf
 *     interrupt, but the infrastructure can't expose that yet, so the guest
67 c821c2bd Alexander Graf
 *     takes but ignores it, goes to sleep and never gets notified that there's
68 c821c2bd Alexander Graf
 *     still an interrupt pending.
69 c6a94ba5 Alexander Graf
 *
70 c821c2bd Alexander Graf
 *     As a quick workaround, let's just wake up again 20 ms after we injected
71 c821c2bd Alexander Graf
 *     an interrupt. That way we can assure that we're always reinjecting
72 c821c2bd Alexander Graf
 *     interrupts in case the guest swallowed them.
73 c6a94ba5 Alexander Graf
 */
74 c6a94ba5 Alexander Graf
static QEMUTimer *idle_timer;
75 c6a94ba5 Alexander Graf
76 d5a68146 Andreas Färber
static void kvm_kick_cpu(void *opaque)
77 c6a94ba5 Alexander Graf
{
78 d5a68146 Andreas Färber
    PowerPCCPU *cpu = opaque;
79 d5a68146 Andreas Färber
80 c08d7424 Andreas Färber
    qemu_cpu_kick(CPU(cpu));
81 c6a94ba5 Alexander Graf
}
82 c6a94ba5 Alexander Graf
83 cad1e282 Jan Kiszka
int kvm_arch_init(KVMState *s)
84 d76d1650 aurel32
{
85 fc87e185 Alexander Graf
    cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
86 fc87e185 Alexander Graf
    cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
87 90dc8812 Scott Wood
    cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
88 90dc8812 Scott Wood
    cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
89 e97c3636 David Gibson
    cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
90 354ac20a David Gibson
    cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
91 0f5cb298 David Gibson
    cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
92 f1af19d7 David Gibson
    cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
93 fc87e185 Alexander Graf
94 fc87e185 Alexander Graf
    if (!cap_interrupt_level) {
95 fc87e185 Alexander Graf
        fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
96 fc87e185 Alexander Graf
                        "VM to stall at times!\n");
97 fc87e185 Alexander Graf
    }
98 fc87e185 Alexander Graf
99 d76d1650 aurel32
    return 0;
100 d76d1650 aurel32
}
101 d76d1650 aurel32
102 1328c2bf Andreas Färber
static int kvm_arch_sync_sregs(CPUPPCState *cenv)
103 d76d1650 aurel32
{
104 861bbc80 Alexander Graf
    struct kvm_sregs sregs;
105 5666ca4a Scott Wood
    int ret;
106 5666ca4a Scott Wood
107 5666ca4a Scott Wood
    if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
108 64e07be5 Alexander Graf
        /* What we're really trying to say is "if we're on BookE, we use
109 64e07be5 Alexander Graf
           the native PVR for now". This is the only sane way to check
110 64e07be5 Alexander Graf
           it though, so we potentially confuse users that they can run
111 64e07be5 Alexander Graf
           BookE guests on BookS. Let's hope nobody dares enough :) */
112 5666ca4a Scott Wood
        return 0;
113 5666ca4a Scott Wood
    } else {
114 90dc8812 Scott Wood
        if (!cap_segstate) {
115 64e07be5 Alexander Graf
            fprintf(stderr, "kvm error: missing PVR setting capability\n");
116 64e07be5 Alexander Graf
            return -ENOSYS;
117 5666ca4a Scott Wood
        }
118 5666ca4a Scott Wood
    }
119 5666ca4a Scott Wood
120 5666ca4a Scott Wood
    ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
121 5666ca4a Scott Wood
    if (ret) {
122 5666ca4a Scott Wood
        return ret;
123 5666ca4a Scott Wood
    }
124 861bbc80 Alexander Graf
125 861bbc80 Alexander Graf
    sregs.pvr = cenv->spr[SPR_PVR];
126 5666ca4a Scott Wood
    return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
127 5666ca4a Scott Wood
}
128 5666ca4a Scott Wood
129 93dd5e85 Scott Wood
/* Set up a shared TLB array with KVM */
130 1328c2bf Andreas Färber
static int kvm_booke206_tlb_init(CPUPPCState *env)
131 93dd5e85 Scott Wood
{
132 93dd5e85 Scott Wood
    struct kvm_book3e_206_tlb_params params = {};
133 93dd5e85 Scott Wood
    struct kvm_config_tlb cfg = {};
134 93dd5e85 Scott Wood
    struct kvm_enable_cap encap = {};
135 93dd5e85 Scott Wood
    unsigned int entries = 0;
136 93dd5e85 Scott Wood
    int ret, i;
137 93dd5e85 Scott Wood
138 93dd5e85 Scott Wood
    if (!kvm_enabled() ||
139 93dd5e85 Scott Wood
        !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) {
140 93dd5e85 Scott Wood
        return 0;
141 93dd5e85 Scott Wood
    }
142 93dd5e85 Scott Wood
143 93dd5e85 Scott Wood
    assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
144 93dd5e85 Scott Wood
145 93dd5e85 Scott Wood
    for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
146 93dd5e85 Scott Wood
        params.tlb_sizes[i] = booke206_tlb_size(env, i);
147 93dd5e85 Scott Wood
        params.tlb_ways[i] = booke206_tlb_ways(env, i);
148 93dd5e85 Scott Wood
        entries += params.tlb_sizes[i];
149 93dd5e85 Scott Wood
    }
150 93dd5e85 Scott Wood
151 93dd5e85 Scott Wood
    assert(entries == env->nb_tlb);
152 93dd5e85 Scott Wood
    assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
153 93dd5e85 Scott Wood
154 93dd5e85 Scott Wood
    env->tlb_dirty = true;
155 93dd5e85 Scott Wood
156 93dd5e85 Scott Wood
    cfg.array = (uintptr_t)env->tlb.tlbm;
157 93dd5e85 Scott Wood
    cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
158 93dd5e85 Scott Wood
    cfg.params = (uintptr_t)&params;
159 93dd5e85 Scott Wood
    cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
160 93dd5e85 Scott Wood
161 93dd5e85 Scott Wood
    encap.cap = KVM_CAP_SW_TLB;
162 93dd5e85 Scott Wood
    encap.args[0] = (uintptr_t)&cfg;
163 93dd5e85 Scott Wood
164 93dd5e85 Scott Wood
    ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap);
165 93dd5e85 Scott Wood
    if (ret < 0) {
166 93dd5e85 Scott Wood
        fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
167 93dd5e85 Scott Wood
                __func__, strerror(-ret));
168 93dd5e85 Scott Wood
        return ret;
169 93dd5e85 Scott Wood
    }
170 93dd5e85 Scott Wood
171 93dd5e85 Scott Wood
    env->kvm_sw_tlb = true;
172 93dd5e85 Scott Wood
    return 0;
173 93dd5e85 Scott Wood
}
174 93dd5e85 Scott Wood
175 4656e1f0 Benjamin Herrenschmidt
176 4656e1f0 Benjamin Herrenschmidt
#if defined(TARGET_PPC64)
177 4656e1f0 Benjamin Herrenschmidt
static void kvm_get_fallback_smmu_info(CPUPPCState *env,
178 4656e1f0 Benjamin Herrenschmidt
                                       struct kvm_ppc_smmu_info *info)
179 4656e1f0 Benjamin Herrenschmidt
{
180 4656e1f0 Benjamin Herrenschmidt
    memset(info, 0, sizeof(*info));
181 4656e1f0 Benjamin Herrenschmidt
182 4656e1f0 Benjamin Herrenschmidt
    /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
183 4656e1f0 Benjamin Herrenschmidt
     * need to "guess" what the supported page sizes are.
184 4656e1f0 Benjamin Herrenschmidt
     *
185 4656e1f0 Benjamin Herrenschmidt
     * For that to work we make a few assumptions:
186 4656e1f0 Benjamin Herrenschmidt
     *
187 4656e1f0 Benjamin Herrenschmidt
     * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
188 4656e1f0 Benjamin Herrenschmidt
     *   KVM which only supports 4K and 16M pages, but supports them
189 4656e1f0 Benjamin Herrenschmidt
     *   regardless of the backing store characteritics. We also don't
190 4656e1f0 Benjamin Herrenschmidt
     *   support 1T segments.
191 4656e1f0 Benjamin Herrenschmidt
     *
192 4656e1f0 Benjamin Herrenschmidt
     *   This is safe as if HV KVM ever supports that capability or PR
193 4656e1f0 Benjamin Herrenschmidt
     *   KVM grows supports for more page/segment sizes, those versions
194 4656e1f0 Benjamin Herrenschmidt
     *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
195 4656e1f0 Benjamin Herrenschmidt
     *   will not hit this fallback
196 4656e1f0 Benjamin Herrenschmidt
     *
197 4656e1f0 Benjamin Herrenschmidt
     * - Else we are running HV KVM. This means we only support page
198 4656e1f0 Benjamin Herrenschmidt
     *   sizes that fit in the backing store. Additionally we only
199 4656e1f0 Benjamin Herrenschmidt
     *   advertize 64K pages if the processor is ARCH 2.06 and we assume
200 4656e1f0 Benjamin Herrenschmidt
     *   P7 encodings for the SLB and hash table. Here too, we assume
201 4656e1f0 Benjamin Herrenschmidt
     *   support for any newer processor will mean a kernel that
202 4656e1f0 Benjamin Herrenschmidt
     *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
203 4656e1f0 Benjamin Herrenschmidt
     *   this fallback.
204 4656e1f0 Benjamin Herrenschmidt
     */
205 4656e1f0 Benjamin Herrenschmidt
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
206 4656e1f0 Benjamin Herrenschmidt
        /* No flags */
207 4656e1f0 Benjamin Herrenschmidt
        info->flags = 0;
208 4656e1f0 Benjamin Herrenschmidt
        info->slb_size = 64;
209 4656e1f0 Benjamin Herrenschmidt
210 4656e1f0 Benjamin Herrenschmidt
        /* Standard 4k base page size segment */
211 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].page_shift = 12;
212 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].slb_enc = 0;
213 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].enc[0].page_shift = 12;
214 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].enc[0].pte_enc = 0;
215 4656e1f0 Benjamin Herrenschmidt
216 4656e1f0 Benjamin Herrenschmidt
        /* Standard 16M large page size segment */
217 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].page_shift = 24;
218 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].slb_enc = SLB_VSID_L;
219 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].enc[0].page_shift = 24;
220 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].enc[0].pte_enc = 0;
221 4656e1f0 Benjamin Herrenschmidt
    } else {
222 4656e1f0 Benjamin Herrenschmidt
        int i = 0;
223 4656e1f0 Benjamin Herrenschmidt
224 4656e1f0 Benjamin Herrenschmidt
        /* HV KVM has backing store size restrictions */
225 4656e1f0 Benjamin Herrenschmidt
        info->flags = KVM_PPC_PAGE_SIZES_REAL;
226 4656e1f0 Benjamin Herrenschmidt
227 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model & POWERPC_MMU_1TSEG) {
228 4656e1f0 Benjamin Herrenschmidt
            info->flags |= KVM_PPC_1T_SEGMENTS;
229 4656e1f0 Benjamin Herrenschmidt
        }
230 4656e1f0 Benjamin Herrenschmidt
231 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model == POWERPC_MMU_2_06) {
232 4656e1f0 Benjamin Herrenschmidt
            info->slb_size = 32;
233 4656e1f0 Benjamin Herrenschmidt
        } else {
234 4656e1f0 Benjamin Herrenschmidt
            info->slb_size = 64;
235 4656e1f0 Benjamin Herrenschmidt
        }
236 4656e1f0 Benjamin Herrenschmidt
237 4656e1f0 Benjamin Herrenschmidt
        /* Standard 4k base page size segment */
238 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].page_shift = 12;
239 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].slb_enc = 0;
240 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].page_shift = 12;
241 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].pte_enc = 0;
242 4656e1f0 Benjamin Herrenschmidt
        i++;
243 4656e1f0 Benjamin Herrenschmidt
244 4656e1f0 Benjamin Herrenschmidt
        /* 64K on MMU 2.06 */
245 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model == POWERPC_MMU_2_06) {
246 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].page_shift = 16;
247 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].slb_enc = 0x110;
248 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].enc[0].page_shift = 16;
249 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].enc[0].pte_enc = 1;
250 4656e1f0 Benjamin Herrenschmidt
            i++;
251 4656e1f0 Benjamin Herrenschmidt
        }
252 4656e1f0 Benjamin Herrenschmidt
253 4656e1f0 Benjamin Herrenschmidt
        /* Standard 16M large page size segment */
254 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].page_shift = 24;
255 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].slb_enc = SLB_VSID_L;
256 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].page_shift = 24;
257 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].pte_enc = 0;
258 4656e1f0 Benjamin Herrenschmidt
    }
259 4656e1f0 Benjamin Herrenschmidt
}
260 4656e1f0 Benjamin Herrenschmidt
261 4656e1f0 Benjamin Herrenschmidt
static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info)
262 4656e1f0 Benjamin Herrenschmidt
{
263 4656e1f0 Benjamin Herrenschmidt
    int ret;
264 4656e1f0 Benjamin Herrenschmidt
265 4656e1f0 Benjamin Herrenschmidt
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
266 4656e1f0 Benjamin Herrenschmidt
        ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
267 4656e1f0 Benjamin Herrenschmidt
        if (ret == 0) {
268 4656e1f0 Benjamin Herrenschmidt
            return;
269 4656e1f0 Benjamin Herrenschmidt
        }
270 4656e1f0 Benjamin Herrenschmidt
    }
271 4656e1f0 Benjamin Herrenschmidt
272 4656e1f0 Benjamin Herrenschmidt
    kvm_get_fallback_smmu_info(env, info);
273 4656e1f0 Benjamin Herrenschmidt
}
274 4656e1f0 Benjamin Herrenschmidt
275 4656e1f0 Benjamin Herrenschmidt
static long getrampagesize(void)
276 4656e1f0 Benjamin Herrenschmidt
{
277 4656e1f0 Benjamin Herrenschmidt
    struct statfs fs;
278 4656e1f0 Benjamin Herrenschmidt
    int ret;
279 4656e1f0 Benjamin Herrenschmidt
280 4656e1f0 Benjamin Herrenschmidt
    if (!mem_path) {
281 4656e1f0 Benjamin Herrenschmidt
        /* guest RAM is backed by normal anonymous pages */
282 4656e1f0 Benjamin Herrenschmidt
        return getpagesize();
283 4656e1f0 Benjamin Herrenschmidt
    }
284 4656e1f0 Benjamin Herrenschmidt
285 4656e1f0 Benjamin Herrenschmidt
    do {
286 4656e1f0 Benjamin Herrenschmidt
        ret = statfs(mem_path, &fs);
287 4656e1f0 Benjamin Herrenschmidt
    } while (ret != 0 && errno == EINTR);
288 4656e1f0 Benjamin Herrenschmidt
289 4656e1f0 Benjamin Herrenschmidt
    if (ret != 0) {
290 4656e1f0 Benjamin Herrenschmidt
        fprintf(stderr, "Couldn't statfs() memory path: %s\n",
291 4656e1f0 Benjamin Herrenschmidt
                strerror(errno));
292 4656e1f0 Benjamin Herrenschmidt
        exit(1);
293 4656e1f0 Benjamin Herrenschmidt
    }
294 4656e1f0 Benjamin Herrenschmidt
295 4656e1f0 Benjamin Herrenschmidt
#define HUGETLBFS_MAGIC       0x958458f6
296 4656e1f0 Benjamin Herrenschmidt
297 4656e1f0 Benjamin Herrenschmidt
    if (fs.f_type != HUGETLBFS_MAGIC) {
298 4656e1f0 Benjamin Herrenschmidt
        /* Explicit mempath, but it's ordinary pages */
299 4656e1f0 Benjamin Herrenschmidt
        return getpagesize();
300 4656e1f0 Benjamin Herrenschmidt
    }
301 4656e1f0 Benjamin Herrenschmidt
302 4656e1f0 Benjamin Herrenschmidt
    /* It's hugepage, return the huge page size */
303 4656e1f0 Benjamin Herrenschmidt
    return fs.f_bsize;
304 4656e1f0 Benjamin Herrenschmidt
}
305 4656e1f0 Benjamin Herrenschmidt
306 4656e1f0 Benjamin Herrenschmidt
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
307 4656e1f0 Benjamin Herrenschmidt
{
308 4656e1f0 Benjamin Herrenschmidt
    if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
309 4656e1f0 Benjamin Herrenschmidt
        return true;
310 4656e1f0 Benjamin Herrenschmidt
    }
311 4656e1f0 Benjamin Herrenschmidt
312 4656e1f0 Benjamin Herrenschmidt
    return (1ul << shift) <= rampgsize;
313 4656e1f0 Benjamin Herrenschmidt
}
314 4656e1f0 Benjamin Herrenschmidt
315 4656e1f0 Benjamin Herrenschmidt
static void kvm_fixup_page_sizes(CPUPPCState *env)
316 4656e1f0 Benjamin Herrenschmidt
{
317 4656e1f0 Benjamin Herrenschmidt
    static struct kvm_ppc_smmu_info smmu_info;
318 4656e1f0 Benjamin Herrenschmidt
    static bool has_smmu_info;
319 4656e1f0 Benjamin Herrenschmidt
    long rampagesize;
320 4656e1f0 Benjamin Herrenschmidt
    int iq, ik, jq, jk;
321 4656e1f0 Benjamin Herrenschmidt
322 4656e1f0 Benjamin Herrenschmidt
    /* We only handle page sizes for 64-bit server guests for now */
323 4656e1f0 Benjamin Herrenschmidt
    if (!(env->mmu_model & POWERPC_MMU_64)) {
324 4656e1f0 Benjamin Herrenschmidt
        return;
325 4656e1f0 Benjamin Herrenschmidt
    }
326 4656e1f0 Benjamin Herrenschmidt
327 4656e1f0 Benjamin Herrenschmidt
    /* Collect MMU info from kernel if not already */
328 4656e1f0 Benjamin Herrenschmidt
    if (!has_smmu_info) {
329 4656e1f0 Benjamin Herrenschmidt
        kvm_get_smmu_info(env, &smmu_info);
330 4656e1f0 Benjamin Herrenschmidt
        has_smmu_info = true;
331 4656e1f0 Benjamin Herrenschmidt
    }
332 4656e1f0 Benjamin Herrenschmidt
333 4656e1f0 Benjamin Herrenschmidt
    rampagesize = getrampagesize();
334 4656e1f0 Benjamin Herrenschmidt
335 4656e1f0 Benjamin Herrenschmidt
    /* Convert to QEMU form */
336 4656e1f0 Benjamin Herrenschmidt
    memset(&env->sps, 0, sizeof(env->sps));
337 4656e1f0 Benjamin Herrenschmidt
338 4656e1f0 Benjamin Herrenschmidt
    for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
339 4656e1f0 Benjamin Herrenschmidt
        struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
340 4656e1f0 Benjamin Herrenschmidt
        struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
341 4656e1f0 Benjamin Herrenschmidt
342 4656e1f0 Benjamin Herrenschmidt
        if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
343 4656e1f0 Benjamin Herrenschmidt
                                 ksps->page_shift)) {
344 4656e1f0 Benjamin Herrenschmidt
            continue;
345 4656e1f0 Benjamin Herrenschmidt
        }
346 4656e1f0 Benjamin Herrenschmidt
        qsps->page_shift = ksps->page_shift;
347 4656e1f0 Benjamin Herrenschmidt
        qsps->slb_enc = ksps->slb_enc;
348 4656e1f0 Benjamin Herrenschmidt
        for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
349 4656e1f0 Benjamin Herrenschmidt
            if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
350 4656e1f0 Benjamin Herrenschmidt
                                     ksps->enc[jk].page_shift)) {
351 4656e1f0 Benjamin Herrenschmidt
                continue;
352 4656e1f0 Benjamin Herrenschmidt
            }
353 4656e1f0 Benjamin Herrenschmidt
            qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
354 4656e1f0 Benjamin Herrenschmidt
            qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
355 4656e1f0 Benjamin Herrenschmidt
            if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
356 4656e1f0 Benjamin Herrenschmidt
                break;
357 4656e1f0 Benjamin Herrenschmidt
            }
358 4656e1f0 Benjamin Herrenschmidt
        }
359 4656e1f0 Benjamin Herrenschmidt
        if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
360 4656e1f0 Benjamin Herrenschmidt
            break;
361 4656e1f0 Benjamin Herrenschmidt
        }
362 4656e1f0 Benjamin Herrenschmidt
    }
363 4656e1f0 Benjamin Herrenschmidt
    env->slb_nr = smmu_info.slb_size;
364 4656e1f0 Benjamin Herrenschmidt
    if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) {
365 4656e1f0 Benjamin Herrenschmidt
        env->mmu_model |= POWERPC_MMU_1TSEG;
366 4656e1f0 Benjamin Herrenschmidt
    } else {
367 4656e1f0 Benjamin Herrenschmidt
        env->mmu_model &= ~POWERPC_MMU_1TSEG;
368 4656e1f0 Benjamin Herrenschmidt
    }
369 4656e1f0 Benjamin Herrenschmidt
}
370 4656e1f0 Benjamin Herrenschmidt
#else /* defined (TARGET_PPC64) */
371 4656e1f0 Benjamin Herrenschmidt
372 4656e1f0 Benjamin Herrenschmidt
static inline void kvm_fixup_page_sizes(CPUPPCState *env)
373 4656e1f0 Benjamin Herrenschmidt
{
374 4656e1f0 Benjamin Herrenschmidt
}
375 4656e1f0 Benjamin Herrenschmidt
376 4656e1f0 Benjamin Herrenschmidt
#endif /* !defined (TARGET_PPC64) */
377 4656e1f0 Benjamin Herrenschmidt
378 1328c2bf Andreas Färber
int kvm_arch_init_vcpu(CPUPPCState *cenv)
379 5666ca4a Scott Wood
{
380 d5a68146 Andreas Färber
    PowerPCCPU *cpu = ppc_env_get_cpu(cenv);
381 5666ca4a Scott Wood
    int ret;
382 5666ca4a Scott Wood
383 4656e1f0 Benjamin Herrenschmidt
    /* Gather server mmu info from KVM and update the CPU state */
384 4656e1f0 Benjamin Herrenschmidt
    kvm_fixup_page_sizes(cenv);
385 4656e1f0 Benjamin Herrenschmidt
386 4656e1f0 Benjamin Herrenschmidt
    /* Synchronize sregs with kvm */
387 5666ca4a Scott Wood
    ret = kvm_arch_sync_sregs(cenv);
388 5666ca4a Scott Wood
    if (ret) {
389 5666ca4a Scott Wood
        return ret;
390 5666ca4a Scott Wood
    }
391 861bbc80 Alexander Graf
392 d5a68146 Andreas Färber
    idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_cpu, cpu);
393 c821c2bd Alexander Graf
394 93dd5e85 Scott Wood
    /* Some targets support access to KVM's guest TLB. */
395 93dd5e85 Scott Wood
    switch (cenv->mmu_model) {
396 93dd5e85 Scott Wood
    case POWERPC_MMU_BOOKE206:
397 93dd5e85 Scott Wood
        ret = kvm_booke206_tlb_init(cenv);
398 93dd5e85 Scott Wood
        break;
399 93dd5e85 Scott Wood
    default:
400 93dd5e85 Scott Wood
        break;
401 93dd5e85 Scott Wood
    }
402 93dd5e85 Scott Wood
403 861bbc80 Alexander Graf
    return ret;
404 d76d1650 aurel32
}
405 d76d1650 aurel32
406 1328c2bf Andreas Färber
void kvm_arch_reset_vcpu(CPUPPCState *env)
407 caa5af0f Jan Kiszka
{
408 caa5af0f Jan Kiszka
}
409 caa5af0f Jan Kiszka
410 1328c2bf Andreas Färber
static void kvm_sw_tlb_put(CPUPPCState *env)
411 93dd5e85 Scott Wood
{
412 93dd5e85 Scott Wood
    struct kvm_dirty_tlb dirty_tlb;
413 93dd5e85 Scott Wood
    unsigned char *bitmap;
414 93dd5e85 Scott Wood
    int ret;
415 93dd5e85 Scott Wood
416 93dd5e85 Scott Wood
    if (!env->kvm_sw_tlb) {
417 93dd5e85 Scott Wood
        return;
418 93dd5e85 Scott Wood
    }
419 93dd5e85 Scott Wood
420 93dd5e85 Scott Wood
    bitmap = g_malloc((env->nb_tlb + 7) / 8);
421 93dd5e85 Scott Wood
    memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
422 93dd5e85 Scott Wood
423 93dd5e85 Scott Wood
    dirty_tlb.bitmap = (uintptr_t)bitmap;
424 93dd5e85 Scott Wood
    dirty_tlb.num_dirty = env->nb_tlb;
425 93dd5e85 Scott Wood
426 93dd5e85 Scott Wood
    ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb);
427 93dd5e85 Scott Wood
    if (ret) {
428 93dd5e85 Scott Wood
        fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
429 93dd5e85 Scott Wood
                __func__, strerror(-ret));
430 93dd5e85 Scott Wood
    }
431 93dd5e85 Scott Wood
432 93dd5e85 Scott Wood
    g_free(bitmap);
433 93dd5e85 Scott Wood
}
434 93dd5e85 Scott Wood
435 1328c2bf Andreas Färber
int kvm_arch_put_registers(CPUPPCState *env, int level)
436 d76d1650 aurel32
{
437 d76d1650 aurel32
    struct kvm_regs regs;
438 d76d1650 aurel32
    int ret;
439 d76d1650 aurel32
    int i;
440 d76d1650 aurel32
441 d76d1650 aurel32
    ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
442 d76d1650 aurel32
    if (ret < 0)
443 d76d1650 aurel32
        return ret;
444 d76d1650 aurel32
445 d76d1650 aurel32
    regs.ctr = env->ctr;
446 d76d1650 aurel32
    regs.lr  = env->lr;
447 d76d1650 aurel32
    regs.xer = env->xer;
448 d76d1650 aurel32
    regs.msr = env->msr;
449 d76d1650 aurel32
    regs.pc = env->nip;
450 d76d1650 aurel32
451 d76d1650 aurel32
    regs.srr0 = env->spr[SPR_SRR0];
452 d76d1650 aurel32
    regs.srr1 = env->spr[SPR_SRR1];
453 d76d1650 aurel32
454 d76d1650 aurel32
    regs.sprg0 = env->spr[SPR_SPRG0];
455 d76d1650 aurel32
    regs.sprg1 = env->spr[SPR_SPRG1];
456 d76d1650 aurel32
    regs.sprg2 = env->spr[SPR_SPRG2];
457 d76d1650 aurel32
    regs.sprg3 = env->spr[SPR_SPRG3];
458 d76d1650 aurel32
    regs.sprg4 = env->spr[SPR_SPRG4];
459 d76d1650 aurel32
    regs.sprg5 = env->spr[SPR_SPRG5];
460 d76d1650 aurel32
    regs.sprg6 = env->spr[SPR_SPRG6];
461 d76d1650 aurel32
    regs.sprg7 = env->spr[SPR_SPRG7];
462 d76d1650 aurel32
463 90dc8812 Scott Wood
    regs.pid = env->spr[SPR_BOOKE_PID];
464 90dc8812 Scott Wood
465 d76d1650 aurel32
    for (i = 0;i < 32; i++)
466 d76d1650 aurel32
        regs.gpr[i] = env->gpr[i];
467 d76d1650 aurel32
468 d76d1650 aurel32
    ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
469 d76d1650 aurel32
    if (ret < 0)
470 d76d1650 aurel32
        return ret;
471 d76d1650 aurel32
472 93dd5e85 Scott Wood
    if (env->tlb_dirty) {
473 93dd5e85 Scott Wood
        kvm_sw_tlb_put(env);
474 93dd5e85 Scott Wood
        env->tlb_dirty = false;
475 93dd5e85 Scott Wood
    }
476 93dd5e85 Scott Wood
477 f1af19d7 David Gibson
    if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
478 f1af19d7 David Gibson
        struct kvm_sregs sregs;
479 f1af19d7 David Gibson
480 f1af19d7 David Gibson
        sregs.pvr = env->spr[SPR_PVR];
481 f1af19d7 David Gibson
482 f1af19d7 David Gibson
        sregs.u.s.sdr1 = env->spr[SPR_SDR1];
483 f1af19d7 David Gibson
484 f1af19d7 David Gibson
        /* Sync SLB */
485 f1af19d7 David Gibson
#ifdef TARGET_PPC64
486 f1af19d7 David Gibson
        for (i = 0; i < 64; i++) {
487 f1af19d7 David Gibson
            sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
488 f1af19d7 David Gibson
            sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
489 f1af19d7 David Gibson
        }
490 f1af19d7 David Gibson
#endif
491 f1af19d7 David Gibson
492 f1af19d7 David Gibson
        /* Sync SRs */
493 f1af19d7 David Gibson
        for (i = 0; i < 16; i++) {
494 f1af19d7 David Gibson
            sregs.u.s.ppc32.sr[i] = env->sr[i];
495 f1af19d7 David Gibson
        }
496 f1af19d7 David Gibson
497 f1af19d7 David Gibson
        /* Sync BATs */
498 f1af19d7 David Gibson
        for (i = 0; i < 8; i++) {
499 ef8beb0e Alexander Graf
            /* Beware. We have to swap upper and lower bits here */
500 ef8beb0e Alexander Graf
            sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
501 ef8beb0e Alexander Graf
                | env->DBAT[1][i];
502 ef8beb0e Alexander Graf
            sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
503 ef8beb0e Alexander Graf
                | env->IBAT[1][i];
504 f1af19d7 David Gibson
        }
505 f1af19d7 David Gibson
506 f1af19d7 David Gibson
        ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
507 f1af19d7 David Gibson
        if (ret) {
508 f1af19d7 David Gibson
            return ret;
509 f1af19d7 David Gibson
        }
510 f1af19d7 David Gibson
    }
511 f1af19d7 David Gibson
512 f1af19d7 David Gibson
    if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
513 f1af19d7 David Gibson
        uint64_t hior = env->spr[SPR_HIOR];
514 f1af19d7 David Gibson
        struct kvm_one_reg reg = {
515 f1af19d7 David Gibson
            .id = KVM_REG_PPC_HIOR,
516 f1af19d7 David Gibson
            .addr = (uintptr_t) &hior,
517 f1af19d7 David Gibson
        };
518 f1af19d7 David Gibson
519 f1af19d7 David Gibson
        ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, &reg);
520 f1af19d7 David Gibson
        if (ret) {
521 f1af19d7 David Gibson
            return ret;
522 f1af19d7 David Gibson
        }
523 f1af19d7 David Gibson
    }
524 f1af19d7 David Gibson
525 d76d1650 aurel32
    return ret;
526 d76d1650 aurel32
}
527 d76d1650 aurel32
528 1328c2bf Andreas Färber
int kvm_arch_get_registers(CPUPPCState *env)
529 d76d1650 aurel32
{
530 d76d1650 aurel32
    struct kvm_regs regs;
531 ba5e5090 Alexander Graf
    struct kvm_sregs sregs;
532 90dc8812 Scott Wood
    uint32_t cr;
533 138b38b6 Alexander Graf
    int i, ret;
534 d76d1650 aurel32
535 d76d1650 aurel32
    ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
536 d76d1650 aurel32
    if (ret < 0)
537 d76d1650 aurel32
        return ret;
538 d76d1650 aurel32
539 90dc8812 Scott Wood
    cr = regs.cr;
540 90dc8812 Scott Wood
    for (i = 7; i >= 0; i--) {
541 90dc8812 Scott Wood
        env->crf[i] = cr & 15;
542 90dc8812 Scott Wood
        cr >>= 4;
543 90dc8812 Scott Wood
    }
544 ba5e5090 Alexander Graf
545 d76d1650 aurel32
    env->ctr = regs.ctr;
546 d76d1650 aurel32
    env->lr = regs.lr;
547 d76d1650 aurel32
    env->xer = regs.xer;
548 d76d1650 aurel32
    env->msr = regs.msr;
549 d76d1650 aurel32
    env->nip = regs.pc;
550 d76d1650 aurel32
551 d76d1650 aurel32
    env->spr[SPR_SRR0] = regs.srr0;
552 d76d1650 aurel32
    env->spr[SPR_SRR1] = regs.srr1;
553 d76d1650 aurel32
554 d76d1650 aurel32
    env->spr[SPR_SPRG0] = regs.sprg0;
555 d76d1650 aurel32
    env->spr[SPR_SPRG1] = regs.sprg1;
556 d76d1650 aurel32
    env->spr[SPR_SPRG2] = regs.sprg2;
557 d76d1650 aurel32
    env->spr[SPR_SPRG3] = regs.sprg3;
558 d76d1650 aurel32
    env->spr[SPR_SPRG4] = regs.sprg4;
559 d76d1650 aurel32
    env->spr[SPR_SPRG5] = regs.sprg5;
560 d76d1650 aurel32
    env->spr[SPR_SPRG6] = regs.sprg6;
561 d76d1650 aurel32
    env->spr[SPR_SPRG7] = regs.sprg7;
562 d76d1650 aurel32
563 90dc8812 Scott Wood
    env->spr[SPR_BOOKE_PID] = regs.pid;
564 90dc8812 Scott Wood
565 d76d1650 aurel32
    for (i = 0;i < 32; i++)
566 d76d1650 aurel32
        env->gpr[i] = regs.gpr[i];
567 d76d1650 aurel32
568 90dc8812 Scott Wood
    if (cap_booke_sregs) {
569 90dc8812 Scott Wood
        ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
570 90dc8812 Scott Wood
        if (ret < 0) {
571 90dc8812 Scott Wood
            return ret;
572 90dc8812 Scott Wood
        }
573 90dc8812 Scott Wood
574 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_BASE) {
575 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
576 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
577 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
578 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
579 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
580 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
581 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
582 90dc8812 Scott Wood
            env->spr[SPR_DECR] = sregs.u.e.dec;
583 90dc8812 Scott Wood
            env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
584 90dc8812 Scott Wood
            env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
585 90dc8812 Scott Wood
            env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
586 90dc8812 Scott Wood
        }
587 90dc8812 Scott Wood
588 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
589 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
590 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
591 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
592 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
593 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
594 90dc8812 Scott Wood
        }
595 90dc8812 Scott Wood
596 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_64) {
597 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
598 90dc8812 Scott Wood
        }
599 90dc8812 Scott Wood
600 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
601 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
602 90dc8812 Scott Wood
        }
603 90dc8812 Scott Wood
604 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
605 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
606 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
607 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
608 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
609 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
610 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
611 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
612 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
613 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
614 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
615 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
616 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
617 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
618 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
619 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
620 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
621 90dc8812 Scott Wood
622 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_SPE) {
623 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
624 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
625 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
626 90dc8812 Scott Wood
            }
627 90dc8812 Scott Wood
628 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_PM) {
629 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
630 90dc8812 Scott Wood
            }
631 90dc8812 Scott Wood
632 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_PC) {
633 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
634 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
635 90dc8812 Scott Wood
            }
636 90dc8812 Scott Wood
        }
637 90dc8812 Scott Wood
638 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
639 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
640 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
641 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
642 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
643 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
644 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
645 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
646 90dc8812 Scott Wood
            env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
647 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
648 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
649 90dc8812 Scott Wood
        }
650 90dc8812 Scott Wood
651 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_EXP) {
652 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
653 90dc8812 Scott Wood
        }
654 90dc8812 Scott Wood
655 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_PD) {
656 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
657 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
658 90dc8812 Scott Wood
        }
659 90dc8812 Scott Wood
660 90dc8812 Scott Wood
        if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
661 90dc8812 Scott Wood
            env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
662 90dc8812 Scott Wood
            env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
663 90dc8812 Scott Wood
            env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
664 90dc8812 Scott Wood
665 90dc8812 Scott Wood
            if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
666 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
667 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
668 90dc8812 Scott Wood
            }
669 90dc8812 Scott Wood
        }
670 fafc0b6a Alexander Graf
    }
671 90dc8812 Scott Wood
672 90dc8812 Scott Wood
    if (cap_segstate) {
673 90dc8812 Scott Wood
        ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
674 90dc8812 Scott Wood
        if (ret < 0) {
675 90dc8812 Scott Wood
            return ret;
676 90dc8812 Scott Wood
        }
677 90dc8812 Scott Wood
678 bb593904 David Gibson
        ppc_store_sdr1(env, sregs.u.s.sdr1);
679 ba5e5090 Alexander Graf
680 ba5e5090 Alexander Graf
        /* Sync SLB */
681 82c09f2f Alexander Graf
#ifdef TARGET_PPC64
682 ba5e5090 Alexander Graf
        for (i = 0; i < 64; i++) {
683 ba5e5090 Alexander Graf
            ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
684 ba5e5090 Alexander Graf
                               sregs.u.s.ppc64.slb[i].slbv);
685 ba5e5090 Alexander Graf
        }
686 82c09f2f Alexander Graf
#endif
687 ba5e5090 Alexander Graf
688 ba5e5090 Alexander Graf
        /* Sync SRs */
689 ba5e5090 Alexander Graf
        for (i = 0; i < 16; i++) {
690 ba5e5090 Alexander Graf
            env->sr[i] = sregs.u.s.ppc32.sr[i];
691 ba5e5090 Alexander Graf
        }
692 ba5e5090 Alexander Graf
693 ba5e5090 Alexander Graf
        /* Sync BATs */
694 ba5e5090 Alexander Graf
        for (i = 0; i < 8; i++) {
695 ba5e5090 Alexander Graf
            env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
696 ba5e5090 Alexander Graf
            env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
697 ba5e5090 Alexander Graf
            env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
698 ba5e5090 Alexander Graf
            env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
699 ba5e5090 Alexander Graf
        }
700 fafc0b6a Alexander Graf
    }
701 ba5e5090 Alexander Graf
702 d76d1650 aurel32
    return 0;
703 d76d1650 aurel32
}
704 d76d1650 aurel32
705 1328c2bf Andreas Färber
int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level)
706 fc87e185 Alexander Graf
{
707 fc87e185 Alexander Graf
    unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
708 fc87e185 Alexander Graf
709 fc87e185 Alexander Graf
    if (irq != PPC_INTERRUPT_EXT) {
710 fc87e185 Alexander Graf
        return 0;
711 fc87e185 Alexander Graf
    }
712 fc87e185 Alexander Graf
713 fc87e185 Alexander Graf
    if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
714 fc87e185 Alexander Graf
        return 0;
715 fc87e185 Alexander Graf
    }
716 fc87e185 Alexander Graf
717 fc87e185 Alexander Graf
    kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
718 fc87e185 Alexander Graf
719 fc87e185 Alexander Graf
    return 0;
720 fc87e185 Alexander Graf
}
721 fc87e185 Alexander Graf
722 16415335 Alexander Graf
#if defined(TARGET_PPCEMB)
723 16415335 Alexander Graf
#define PPC_INPUT_INT PPC40x_INPUT_INT
724 16415335 Alexander Graf
#elif defined(TARGET_PPC64)
725 16415335 Alexander Graf
#define PPC_INPUT_INT PPC970_INPUT_INT
726 16415335 Alexander Graf
#else
727 16415335 Alexander Graf
#define PPC_INPUT_INT PPC6xx_INPUT_INT
728 16415335 Alexander Graf
#endif
729 16415335 Alexander Graf
730 1328c2bf Andreas Färber
void kvm_arch_pre_run(CPUPPCState *env, struct kvm_run *run)
731 d76d1650 aurel32
{
732 d76d1650 aurel32
    int r;
733 d76d1650 aurel32
    unsigned irq;
734 d76d1650 aurel32
735 5cbdb3a3 Stefan Weil
    /* PowerPC QEMU tracks the various core input pins (interrupt, critical
736 d76d1650 aurel32
     * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
737 fc87e185 Alexander Graf
    if (!cap_interrupt_level &&
738 fc87e185 Alexander Graf
        run->ready_for_interrupt_injection &&
739 d76d1650 aurel32
        (env->interrupt_request & CPU_INTERRUPT_HARD) &&
740 16415335 Alexander Graf
        (env->irq_input_state & (1<<PPC_INPUT_INT)))
741 d76d1650 aurel32
    {
742 d76d1650 aurel32
        /* For now KVM disregards the 'irq' argument. However, in the
743 d76d1650 aurel32
         * future KVM could cache it in-kernel to avoid a heavyweight exit
744 d76d1650 aurel32
         * when reading the UIC.
745 d76d1650 aurel32
         */
746 fc87e185 Alexander Graf
        irq = KVM_INTERRUPT_SET;
747 d76d1650 aurel32
748 d76d1650 aurel32
        dprintf("injected interrupt %d\n", irq);
749 d76d1650 aurel32
        r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
750 d76d1650 aurel32
        if (r < 0)
751 d76d1650 aurel32
            printf("cpu %d fail inject %x\n", env->cpu_index, irq);
752 c821c2bd Alexander Graf
753 c821c2bd Alexander Graf
        /* Always wake up soon in case the interrupt was level based */
754 74475455 Paolo Bonzini
        qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
755 c821c2bd Alexander Graf
                       (get_ticks_per_sec() / 50));
756 d76d1650 aurel32
    }
757 d76d1650 aurel32
758 d76d1650 aurel32
    /* We don't know if there are more interrupts pending after this. However,
759 d76d1650 aurel32
     * the guest will return to userspace in the course of handling this one
760 d76d1650 aurel32
     * anyways, so we will get a chance to deliver the rest. */
761 d76d1650 aurel32
}
762 d76d1650 aurel32
763 1328c2bf Andreas Färber
void kvm_arch_post_run(CPUPPCState *env, struct kvm_run *run)
764 d76d1650 aurel32
{
765 d76d1650 aurel32
}
766 d76d1650 aurel32
767 1328c2bf Andreas Färber
int kvm_arch_process_async_events(CPUPPCState *env)
768 0af691d7 Marcelo Tosatti
{
769 157feead Liu Yu-B13201
    return env->halted;
770 0af691d7 Marcelo Tosatti
}
771 0af691d7 Marcelo Tosatti
772 1328c2bf Andreas Färber
static int kvmppc_handle_halt(CPUPPCState *env)
773 d76d1650 aurel32
{
774 d76d1650 aurel32
    if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
775 d76d1650 aurel32
        env->halted = 1;
776 d76d1650 aurel32
        env->exception_index = EXCP_HLT;
777 d76d1650 aurel32
    }
778 d76d1650 aurel32
779 bb4ea393 Jan Kiszka
    return 0;
780 d76d1650 aurel32
}
781 d76d1650 aurel32
782 d76d1650 aurel32
/* map dcr access to existing qemu dcr emulation */
783 1328c2bf Andreas Färber
static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
784 d76d1650 aurel32
{
785 d76d1650 aurel32
    if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
786 d76d1650 aurel32
        fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
787 d76d1650 aurel32
788 bb4ea393 Jan Kiszka
    return 0;
789 d76d1650 aurel32
}
790 d76d1650 aurel32
791 1328c2bf Andreas Färber
static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
792 d76d1650 aurel32
{
793 d76d1650 aurel32
    if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
794 d76d1650 aurel32
        fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
795 d76d1650 aurel32
796 bb4ea393 Jan Kiszka
    return 0;
797 d76d1650 aurel32
}
798 d76d1650 aurel32
799 1328c2bf Andreas Färber
int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run)
800 d76d1650 aurel32
{
801 bb4ea393 Jan Kiszka
    int ret;
802 d76d1650 aurel32
803 d76d1650 aurel32
    switch (run->exit_reason) {
804 d76d1650 aurel32
    case KVM_EXIT_DCR:
805 d76d1650 aurel32
        if (run->dcr.is_write) {
806 d76d1650 aurel32
            dprintf("handle dcr write\n");
807 d76d1650 aurel32
            ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
808 d76d1650 aurel32
        } else {
809 d76d1650 aurel32
            dprintf("handle dcr read\n");
810 d76d1650 aurel32
            ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
811 d76d1650 aurel32
        }
812 d76d1650 aurel32
        break;
813 d76d1650 aurel32
    case KVM_EXIT_HLT:
814 d76d1650 aurel32
        dprintf("handle halt\n");
815 d76d1650 aurel32
        ret = kvmppc_handle_halt(env);
816 d76d1650 aurel32
        break;
817 f61b4bed Alexander Graf
#ifdef CONFIG_PSERIES
818 f61b4bed Alexander Graf
    case KVM_EXIT_PAPR_HCALL:
819 f61b4bed Alexander Graf
        dprintf("handle PAPR hypercall\n");
820 aa100fa4 Andreas Färber
        run->papr_hcall.ret = spapr_hypercall(ppc_env_get_cpu(env),
821 aa100fa4 Andreas Färber
                                              run->papr_hcall.nr,
822 f61b4bed Alexander Graf
                                              run->papr_hcall.args);
823 78e8fde2 David Gibson
        ret = 0;
824 f61b4bed Alexander Graf
        break;
825 f61b4bed Alexander Graf
#endif
826 73aaec4a Jan Kiszka
    default:
827 73aaec4a Jan Kiszka
        fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
828 73aaec4a Jan Kiszka
        ret = -1;
829 73aaec4a Jan Kiszka
        break;
830 d76d1650 aurel32
    }
831 d76d1650 aurel32
832 d76d1650 aurel32
    return ret;
833 d76d1650 aurel32
}
834 d76d1650 aurel32
835 dc333cd6 Alexander Graf
static int read_cpuinfo(const char *field, char *value, int len)
836 dc333cd6 Alexander Graf
{
837 dc333cd6 Alexander Graf
    FILE *f;
838 dc333cd6 Alexander Graf
    int ret = -1;
839 dc333cd6 Alexander Graf
    int field_len = strlen(field);
840 dc333cd6 Alexander Graf
    char line[512];
841 dc333cd6 Alexander Graf
842 dc333cd6 Alexander Graf
    f = fopen("/proc/cpuinfo", "r");
843 dc333cd6 Alexander Graf
    if (!f) {
844 dc333cd6 Alexander Graf
        return -1;
845 dc333cd6 Alexander Graf
    }
846 dc333cd6 Alexander Graf
847 dc333cd6 Alexander Graf
    do {
848 dc333cd6 Alexander Graf
        if(!fgets(line, sizeof(line), f)) {
849 dc333cd6 Alexander Graf
            break;
850 dc333cd6 Alexander Graf
        }
851 dc333cd6 Alexander Graf
        if (!strncmp(line, field, field_len)) {
852 ae215068 Jim Meyering
            pstrcpy(value, len, line);
853 dc333cd6 Alexander Graf
            ret = 0;
854 dc333cd6 Alexander Graf
            break;
855 dc333cd6 Alexander Graf
        }
856 dc333cd6 Alexander Graf
    } while(*line);
857 dc333cd6 Alexander Graf
858 dc333cd6 Alexander Graf
    fclose(f);
859 dc333cd6 Alexander Graf
860 dc333cd6 Alexander Graf
    return ret;
861 dc333cd6 Alexander Graf
}
862 dc333cd6 Alexander Graf
863 dc333cd6 Alexander Graf
uint32_t kvmppc_get_tbfreq(void)
864 dc333cd6 Alexander Graf
{
865 dc333cd6 Alexander Graf
    char line[512];
866 dc333cd6 Alexander Graf
    char *ns;
867 dc333cd6 Alexander Graf
    uint32_t retval = get_ticks_per_sec();
868 dc333cd6 Alexander Graf
869 dc333cd6 Alexander Graf
    if (read_cpuinfo("timebase", line, sizeof(line))) {
870 dc333cd6 Alexander Graf
        return retval;
871 dc333cd6 Alexander Graf
    }
872 dc333cd6 Alexander Graf
873 dc333cd6 Alexander Graf
    if (!(ns = strchr(line, ':'))) {
874 dc333cd6 Alexander Graf
        return retval;
875 dc333cd6 Alexander Graf
    }
876 dc333cd6 Alexander Graf
877 dc333cd6 Alexander Graf
    ns++;
878 dc333cd6 Alexander Graf
879 dc333cd6 Alexander Graf
    retval = atoi(ns);
880 dc333cd6 Alexander Graf
    return retval;
881 dc333cd6 Alexander Graf
}
882 4513d923 Gleb Natapov
883 eadaada1 Alexander Graf
/* Try to find a device tree node for a CPU with clock-frequency property */
884 eadaada1 Alexander Graf
static int kvmppc_find_cpu_dt(char *buf, int buf_len)
885 eadaada1 Alexander Graf
{
886 eadaada1 Alexander Graf
    struct dirent *dirp;
887 eadaada1 Alexander Graf
    DIR *dp;
888 eadaada1 Alexander Graf
889 eadaada1 Alexander Graf
    if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
890 eadaada1 Alexander Graf
        printf("Can't open directory " PROC_DEVTREE_CPU "\n");
891 eadaada1 Alexander Graf
        return -1;
892 eadaada1 Alexander Graf
    }
893 eadaada1 Alexander Graf
894 eadaada1 Alexander Graf
    buf[0] = '\0';
895 eadaada1 Alexander Graf
    while ((dirp = readdir(dp)) != NULL) {
896 eadaada1 Alexander Graf
        FILE *f;
897 eadaada1 Alexander Graf
        snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
898 eadaada1 Alexander Graf
                 dirp->d_name);
899 eadaada1 Alexander Graf
        f = fopen(buf, "r");
900 eadaada1 Alexander Graf
        if (f) {
901 eadaada1 Alexander Graf
            snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
902 eadaada1 Alexander Graf
            fclose(f);
903 eadaada1 Alexander Graf
            break;
904 eadaada1 Alexander Graf
        }
905 eadaada1 Alexander Graf
        buf[0] = '\0';
906 eadaada1 Alexander Graf
    }
907 eadaada1 Alexander Graf
    closedir(dp);
908 eadaada1 Alexander Graf
    if (buf[0] == '\0') {
909 eadaada1 Alexander Graf
        printf("Unknown host!\n");
910 eadaada1 Alexander Graf
        return -1;
911 eadaada1 Alexander Graf
    }
912 eadaada1 Alexander Graf
913 eadaada1 Alexander Graf
    return 0;
914 eadaada1 Alexander Graf
}
915 eadaada1 Alexander Graf
916 9bc884b7 David Gibson
/* Read a CPU node property from the host device tree that's a single
917 9bc884b7 David Gibson
 * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
918 9bc884b7 David Gibson
 * (can't find or open the property, or doesn't understand the
919 9bc884b7 David Gibson
 * format) */
920 9bc884b7 David Gibson
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
921 eadaada1 Alexander Graf
{
922 9bc884b7 David Gibson
    char buf[PATH_MAX];
923 9bc884b7 David Gibson
    union {
924 9bc884b7 David Gibson
        uint32_t v32;
925 9bc884b7 David Gibson
        uint64_t v64;
926 9bc884b7 David Gibson
    } u;
927 eadaada1 Alexander Graf
    FILE *f;
928 eadaada1 Alexander Graf
    int len;
929 eadaada1 Alexander Graf
930 eadaada1 Alexander Graf
    if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
931 9bc884b7 David Gibson
        return -1;
932 eadaada1 Alexander Graf
    }
933 eadaada1 Alexander Graf
934 9bc884b7 David Gibson
    strncat(buf, "/", sizeof(buf) - strlen(buf));
935 9bc884b7 David Gibson
    strncat(buf, propname, sizeof(buf) - strlen(buf));
936 eadaada1 Alexander Graf
937 eadaada1 Alexander Graf
    f = fopen(buf, "rb");
938 eadaada1 Alexander Graf
    if (!f) {
939 eadaada1 Alexander Graf
        return -1;
940 eadaada1 Alexander Graf
    }
941 eadaada1 Alexander Graf
942 9bc884b7 David Gibson
    len = fread(&u, 1, sizeof(u), f);
943 eadaada1 Alexander Graf
    fclose(f);
944 eadaada1 Alexander Graf
    switch (len) {
945 9bc884b7 David Gibson
    case 4:
946 9bc884b7 David Gibson
        /* property is a 32-bit quantity */
947 9bc884b7 David Gibson
        return be32_to_cpu(u.v32);
948 9bc884b7 David Gibson
    case 8:
949 9bc884b7 David Gibson
        return be64_to_cpu(u.v64);
950 eadaada1 Alexander Graf
    }
951 eadaada1 Alexander Graf
952 eadaada1 Alexander Graf
    return 0;
953 eadaada1 Alexander Graf
}
954 eadaada1 Alexander Graf
955 9bc884b7 David Gibson
uint64_t kvmppc_get_clockfreq(void)
956 9bc884b7 David Gibson
{
957 9bc884b7 David Gibson
    return kvmppc_read_int_cpu_dt("clock-frequency");
958 9bc884b7 David Gibson
}
959 9bc884b7 David Gibson
960 6659394f David Gibson
uint32_t kvmppc_get_vmx(void)
961 6659394f David Gibson
{
962 6659394f David Gibson
    return kvmppc_read_int_cpu_dt("ibm,vmx");
963 6659394f David Gibson
}
964 6659394f David Gibson
965 6659394f David Gibson
uint32_t kvmppc_get_dfp(void)
966 6659394f David Gibson
{
967 6659394f David Gibson
    return kvmppc_read_int_cpu_dt("ibm,dfp");
968 6659394f David Gibson
}
969 6659394f David Gibson
970 1328c2bf Andreas Färber
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
971 45024f09 Alexander Graf
{
972 45024f09 Alexander Graf
    uint32_t *hc = (uint32_t*)buf;
973 45024f09 Alexander Graf
974 45024f09 Alexander Graf
    struct kvm_ppc_pvinfo pvinfo;
975 45024f09 Alexander Graf
976 45024f09 Alexander Graf
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
977 45024f09 Alexander Graf
        !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
978 45024f09 Alexander Graf
        memcpy(buf, pvinfo.hcall, buf_len);
979 45024f09 Alexander Graf
980 45024f09 Alexander Graf
        return 0;
981 45024f09 Alexander Graf
    }
982 45024f09 Alexander Graf
983 45024f09 Alexander Graf
    /*
984 45024f09 Alexander Graf
     * Fallback to always fail hypercalls:
985 45024f09 Alexander Graf
     *
986 45024f09 Alexander Graf
     *     li r3, -1
987 45024f09 Alexander Graf
     *     nop
988 45024f09 Alexander Graf
     *     nop
989 45024f09 Alexander Graf
     *     nop
990 45024f09 Alexander Graf
     */
991 45024f09 Alexander Graf
992 45024f09 Alexander Graf
    hc[0] = 0x3860ffff;
993 45024f09 Alexander Graf
    hc[1] = 0x60000000;
994 45024f09 Alexander Graf
    hc[2] = 0x60000000;
995 45024f09 Alexander Graf
    hc[3] = 0x60000000;
996 45024f09 Alexander Graf
997 45024f09 Alexander Graf
    return 0;
998 45024f09 Alexander Graf
}
999 45024f09 Alexander Graf
1000 1328c2bf Andreas Färber
void kvmppc_set_papr(CPUPPCState *env)
1001 f61b4bed Alexander Graf
{
1002 94135e81 Alexander Graf
    struct kvm_enable_cap cap = {};
1003 f61b4bed Alexander Graf
    int ret;
1004 f61b4bed Alexander Graf
1005 f61b4bed Alexander Graf
    cap.cap = KVM_CAP_PPC_PAPR;
1006 f61b4bed Alexander Graf
    ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap);
1007 f61b4bed Alexander Graf
1008 f61b4bed Alexander Graf
    if (ret) {
1009 f1af19d7 David Gibson
        cpu_abort(env, "This KVM version does not support PAPR\n");
1010 94135e81 Alexander Graf
    }
1011 f61b4bed Alexander Graf
}
1012 f61b4bed Alexander Graf
1013 e97c3636 David Gibson
int kvmppc_smt_threads(void)
1014 e97c3636 David Gibson
{
1015 e97c3636 David Gibson
    return cap_ppc_smt ? cap_ppc_smt : 1;
1016 e97c3636 David Gibson
}
1017 e97c3636 David Gibson
1018 7f763a5d David Gibson
#ifdef TARGET_PPC64
1019 354ac20a David Gibson
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
1020 354ac20a David Gibson
{
1021 354ac20a David Gibson
    void *rma;
1022 354ac20a David Gibson
    off_t size;
1023 354ac20a David Gibson
    int fd;
1024 354ac20a David Gibson
    struct kvm_allocate_rma ret;
1025 354ac20a David Gibson
    MemoryRegion *rma_region;
1026 354ac20a David Gibson
1027 354ac20a David Gibson
    /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
1028 354ac20a David Gibson
     * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
1029 354ac20a David Gibson
     *                      not necessary on this hardware
1030 354ac20a David Gibson
     * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
1031 354ac20a David Gibson
     *
1032 354ac20a David Gibson
     * FIXME: We should allow the user to force contiguous RMA
1033 354ac20a David Gibson
     * allocation in the cap_ppc_rma==1 case.
1034 354ac20a David Gibson
     */
1035 354ac20a David Gibson
    if (cap_ppc_rma < 2) {
1036 354ac20a David Gibson
        return 0;
1037 354ac20a David Gibson
    }
1038 354ac20a David Gibson
1039 354ac20a David Gibson
    fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
1040 354ac20a David Gibson
    if (fd < 0) {
1041 354ac20a David Gibson
        fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
1042 354ac20a David Gibson
                strerror(errno));
1043 354ac20a David Gibson
        return -1;
1044 354ac20a David Gibson
    }
1045 354ac20a David Gibson
1046 354ac20a David Gibson
    size = MIN(ret.rma_size, 256ul << 20);
1047 354ac20a David Gibson
1048 354ac20a David Gibson
    rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1049 354ac20a David Gibson
    if (rma == MAP_FAILED) {
1050 354ac20a David Gibson
        fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
1051 354ac20a David Gibson
        return -1;
1052 354ac20a David Gibson
    };
1053 354ac20a David Gibson
1054 354ac20a David Gibson
    rma_region = g_new(MemoryRegion, 1);
1055 6148b23d Avi Kivity
    memory_region_init_ram_ptr(rma_region, name, size, rma);
1056 6148b23d Avi Kivity
    vmstate_register_ram_global(rma_region);
1057 354ac20a David Gibson
    memory_region_add_subregion(sysmem, 0, rma_region);
1058 354ac20a David Gibson
1059 354ac20a David Gibson
    return size;
1060 354ac20a David Gibson
}
1061 354ac20a David Gibson
1062 7f763a5d David Gibson
uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
1063 7f763a5d David Gibson
{
1064 7f763a5d David Gibson
    if (cap_ppc_rma >= 2) {
1065 7f763a5d David Gibson
        return current_size;
1066 7f763a5d David Gibson
    }
1067 7f763a5d David Gibson
    return MIN(current_size,
1068 7f763a5d David Gibson
               getrampagesize() << (hash_shift - 7));
1069 7f763a5d David Gibson
}
1070 7f763a5d David Gibson
#endif
1071 7f763a5d David Gibson
1072 0f5cb298 David Gibson
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
1073 0f5cb298 David Gibson
{
1074 0f5cb298 David Gibson
    struct kvm_create_spapr_tce args = {
1075 0f5cb298 David Gibson
        .liobn = liobn,
1076 0f5cb298 David Gibson
        .window_size = window_size,
1077 0f5cb298 David Gibson
    };
1078 0f5cb298 David Gibson
    long len;
1079 0f5cb298 David Gibson
    int fd;
1080 0f5cb298 David Gibson
    void *table;
1081 0f5cb298 David Gibson
1082 b5aec396 David Gibson
    /* Must set fd to -1 so we don't try to munmap when called for
1083 b5aec396 David Gibson
     * destroying the table, which the upper layers -will- do
1084 b5aec396 David Gibson
     */
1085 b5aec396 David Gibson
    *pfd = -1;
1086 0f5cb298 David Gibson
    if (!cap_spapr_tce) {
1087 0f5cb298 David Gibson
        return NULL;
1088 0f5cb298 David Gibson
    }
1089 0f5cb298 David Gibson
1090 0f5cb298 David Gibson
    fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
1091 0f5cb298 David Gibson
    if (fd < 0) {
1092 b5aec396 David Gibson
        fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
1093 b5aec396 David Gibson
                liobn);
1094 0f5cb298 David Gibson
        return NULL;
1095 0f5cb298 David Gibson
    }
1096 0f5cb298 David Gibson
1097 ad0ebb91 David Gibson
    len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(sPAPRTCE);
1098 0f5cb298 David Gibson
    /* FIXME: round this up to page size */
1099 0f5cb298 David Gibson
1100 74b41e56 David Gibson
    table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1101 0f5cb298 David Gibson
    if (table == MAP_FAILED) {
1102 b5aec396 David Gibson
        fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
1103 b5aec396 David Gibson
                liobn);
1104 0f5cb298 David Gibson
        close(fd);
1105 0f5cb298 David Gibson
        return NULL;
1106 0f5cb298 David Gibson
    }
1107 0f5cb298 David Gibson
1108 0f5cb298 David Gibson
    *pfd = fd;
1109 0f5cb298 David Gibson
    return table;
1110 0f5cb298 David Gibson
}
1111 0f5cb298 David Gibson
1112 0f5cb298 David Gibson
int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
1113 0f5cb298 David Gibson
{
1114 0f5cb298 David Gibson
    long len;
1115 0f5cb298 David Gibson
1116 0f5cb298 David Gibson
    if (fd < 0) {
1117 0f5cb298 David Gibson
        return -1;
1118 0f5cb298 David Gibson
    }
1119 0f5cb298 David Gibson
1120 ad0ebb91 David Gibson
    len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE);
1121 0f5cb298 David Gibson
    if ((munmap(table, len) < 0) ||
1122 0f5cb298 David Gibson
        (close(fd) < 0)) {
1123 b5aec396 David Gibson
        fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
1124 b5aec396 David Gibson
                strerror(errno));
1125 0f5cb298 David Gibson
        /* Leak the table */
1126 0f5cb298 David Gibson
    }
1127 0f5cb298 David Gibson
1128 0f5cb298 David Gibson
    return 0;
1129 0f5cb298 David Gibson
}
1130 0f5cb298 David Gibson
1131 7f763a5d David Gibson
int kvmppc_reset_htab(int shift_hint)
1132 7f763a5d David Gibson
{
1133 7f763a5d David Gibson
    uint32_t shift = shift_hint;
1134 7f763a5d David Gibson
1135 ace9a2cb David Gibson
    if (!kvm_enabled()) {
1136 ace9a2cb David Gibson
        /* Full emulation, tell caller to allocate htab itself */
1137 ace9a2cb David Gibson
        return 0;
1138 ace9a2cb David Gibson
    }
1139 ace9a2cb David Gibson
    if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
1140 7f763a5d David Gibson
        int ret;
1141 7f763a5d David Gibson
        ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
1142 ace9a2cb David Gibson
        if (ret == -ENOTTY) {
1143 ace9a2cb David Gibson
            /* At least some versions of PR KVM advertise the
1144 ace9a2cb David Gibson
             * capability, but don't implement the ioctl().  Oops.
1145 ace9a2cb David Gibson
             * Return 0 so that we allocate the htab in qemu, as is
1146 ace9a2cb David Gibson
             * correct for PR. */
1147 ace9a2cb David Gibson
            return 0;
1148 ace9a2cb David Gibson
        } else if (ret < 0) {
1149 7f763a5d David Gibson
            return ret;
1150 7f763a5d David Gibson
        }
1151 7f763a5d David Gibson
        return shift;
1152 7f763a5d David Gibson
    }
1153 7f763a5d David Gibson
1154 ace9a2cb David Gibson
    /* We have a kernel that predates the htab reset calls.  For PR
1155 ace9a2cb David Gibson
     * KVM, we need to allocate the htab ourselves, for an HV KVM of
1156 ace9a2cb David Gibson
     * this era, it has allocated a 16MB fixed size hash table
1157 ace9a2cb David Gibson
     * already.  Kernels of this era have the GET_PVINFO capability
1158 ace9a2cb David Gibson
     * only on PR, so we use this hack to determine the right
1159 ace9a2cb David Gibson
     * answer */
1160 ace9a2cb David Gibson
    if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
1161 ace9a2cb David Gibson
        /* PR - tell caller to allocate htab */
1162 ace9a2cb David Gibson
        return 0;
1163 ace9a2cb David Gibson
    } else {
1164 ace9a2cb David Gibson
        /* HV - assume 16MB kernel allocated htab */
1165 ace9a2cb David Gibson
        return 24;
1166 ace9a2cb David Gibson
    }
1167 7f763a5d David Gibson
}
1168 7f763a5d David Gibson
1169 a1e98583 David Gibson
static inline uint32_t mfpvr(void)
1170 a1e98583 David Gibson
{
1171 a1e98583 David Gibson
    uint32_t pvr;
1172 a1e98583 David Gibson
1173 a1e98583 David Gibson
    asm ("mfpvr %0"
1174 a1e98583 David Gibson
         : "=r"(pvr));
1175 a1e98583 David Gibson
    return pvr;
1176 a1e98583 David Gibson
}
1177 a1e98583 David Gibson
1178 a7342588 David Gibson
static void alter_insns(uint64_t *word, uint64_t flags, bool on)
1179 a7342588 David Gibson
{
1180 a7342588 David Gibson
    if (on) {
1181 a7342588 David Gibson
        *word |= flags;
1182 a7342588 David Gibson
    } else {
1183 a7342588 David Gibson
        *word &= ~flags;
1184 a7342588 David Gibson
    }
1185 a7342588 David Gibson
}
1186 a7342588 David Gibson
1187 a1e98583 David Gibson
const ppc_def_t *kvmppc_host_cpu_def(void)
1188 a1e98583 David Gibson
{
1189 a1e98583 David Gibson
    uint32_t host_pvr = mfpvr();
1190 a1e98583 David Gibson
    const ppc_def_t *base_spec;
1191 a7342588 David Gibson
    ppc_def_t *spec;
1192 a7342588 David Gibson
    uint32_t vmx = kvmppc_get_vmx();
1193 a7342588 David Gibson
    uint32_t dfp = kvmppc_get_dfp();
1194 a1e98583 David Gibson
1195 a1e98583 David Gibson
    base_spec = ppc_find_by_pvr(host_pvr);
1196 a1e98583 David Gibson
1197 a7342588 David Gibson
    spec = g_malloc0(sizeof(*spec));
1198 a7342588 David Gibson
    memcpy(spec, base_spec, sizeof(*spec));
1199 a7342588 David Gibson
1200 a7342588 David Gibson
    /* Now fix up the spec with information we can query from the host */
1201 a7342588 David Gibson
1202 70bca53f Alexander Graf
    if (vmx != -1) {
1203 70bca53f Alexander Graf
        /* Only override when we know what the host supports */
1204 70bca53f Alexander Graf
        alter_insns(&spec->insns_flags, PPC_ALTIVEC, vmx > 0);
1205 70bca53f Alexander Graf
        alter_insns(&spec->insns_flags2, PPC2_VSX, vmx > 1);
1206 70bca53f Alexander Graf
    }
1207 70bca53f Alexander Graf
    if (dfp != -1) {
1208 70bca53f Alexander Graf
        /* Only override when we know what the host supports */
1209 70bca53f Alexander Graf
        alter_insns(&spec->insns_flags2, PPC2_DFP, dfp);
1210 70bca53f Alexander Graf
    }
1211 a7342588 David Gibson
1212 a7342588 David Gibson
    return spec;
1213 a1e98583 David Gibson
}
1214 a1e98583 David Gibson
1215 12b1143b David Gibson
int kvmppc_fixup_cpu(CPUPPCState *env)
1216 12b1143b David Gibson
{
1217 12b1143b David Gibson
    int smt;
1218 12b1143b David Gibson
1219 12b1143b David Gibson
    /* Adjust cpu index for SMT */
1220 12b1143b David Gibson
    smt = kvmppc_smt_threads();
1221 12b1143b David Gibson
    env->cpu_index = (env->cpu_index / smp_threads) * smt
1222 12b1143b David Gibson
        + (env->cpu_index % smp_threads);
1223 12b1143b David Gibson
1224 12b1143b David Gibson
    return 0;
1225 12b1143b David Gibson
}
1226 12b1143b David Gibson
1227 12b1143b David Gibson
1228 1328c2bf Andreas Färber
bool kvm_arch_stop_on_emulation_error(CPUPPCState *env)
1229 4513d923 Gleb Natapov
{
1230 4513d923 Gleb Natapov
    return true;
1231 4513d923 Gleb Natapov
}
1232 a1b87fe0 Jan Kiszka
1233 1328c2bf Andreas Färber
int kvm_arch_on_sigbus_vcpu(CPUPPCState *env, int code, void *addr)
1234 a1b87fe0 Jan Kiszka
{
1235 a1b87fe0 Jan Kiszka
    return 1;
1236 a1b87fe0 Jan Kiszka
}
1237 a1b87fe0 Jan Kiszka
1238 a1b87fe0 Jan Kiszka
int kvm_arch_on_sigbus(int code, void *addr)
1239 a1b87fe0 Jan Kiszka
{
1240 a1b87fe0 Jan Kiszka
    return 1;
1241 a1b87fe0 Jan Kiszka
}