Statistics
| Branch: | Revision:

root / target-ppc / kvm.c @ 60925d26

History | View | Annotate | Download (35.9 kB)

1 d76d1650 aurel32
/*
2 d76d1650 aurel32
 * PowerPC implementation of KVM hooks
3 d76d1650 aurel32
 *
4 d76d1650 aurel32
 * Copyright IBM Corp. 2007
5 90dc8812 Scott Wood
 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 d76d1650 aurel32
 *
7 d76d1650 aurel32
 * Authors:
8 d76d1650 aurel32
 *  Jerone Young <jyoung5@us.ibm.com>
9 d76d1650 aurel32
 *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 d76d1650 aurel32
 *  Hollis Blanchard <hollisb@us.ibm.com>
11 d76d1650 aurel32
 *
12 d76d1650 aurel32
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 d76d1650 aurel32
 * See the COPYING file in the top-level directory.
14 d76d1650 aurel32
 *
15 d76d1650 aurel32
 */
16 d76d1650 aurel32
17 eadaada1 Alexander Graf
#include <dirent.h>
18 d76d1650 aurel32
#include <sys/types.h>
19 d76d1650 aurel32
#include <sys/ioctl.h>
20 d76d1650 aurel32
#include <sys/mman.h>
21 4656e1f0 Benjamin Herrenschmidt
#include <sys/vfs.h>
22 d76d1650 aurel32
23 d76d1650 aurel32
#include <linux/kvm.h>
24 d76d1650 aurel32
25 d76d1650 aurel32
#include "qemu-common.h"
26 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
27 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
28 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
29 d76d1650 aurel32
#include "kvm_ppc.h"
30 d76d1650 aurel32
#include "cpu.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/device_tree.h"
33 0f5cb298 David Gibson
#include "hw/sysbus.h"
34 e97c3636 David Gibson
#include "hw/spapr.h"
35 d76d1650 aurel32
36 f61b4bed Alexander Graf
#include "hw/sysbus.h"
37 f61b4bed Alexander Graf
#include "hw/spapr.h"
38 f61b4bed Alexander Graf
#include "hw/spapr_vio.h"
39 f61b4bed Alexander Graf
40 d76d1650 aurel32
//#define DEBUG_KVM
41 d76d1650 aurel32
42 d76d1650 aurel32
#ifdef DEBUG_KVM
43 d76d1650 aurel32
#define dprintf(fmt, ...) \
44 d76d1650 aurel32
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
45 d76d1650 aurel32
#else
46 d76d1650 aurel32
#define dprintf(fmt, ...) \
47 d76d1650 aurel32
    do { } while (0)
48 d76d1650 aurel32
#endif
49 d76d1650 aurel32
50 eadaada1 Alexander Graf
#define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
51 eadaada1 Alexander Graf
52 94a8d39a Jan Kiszka
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
53 94a8d39a Jan Kiszka
    KVM_CAP_LAST_INFO
54 94a8d39a Jan Kiszka
};
55 94a8d39a Jan Kiszka
56 fc87e185 Alexander Graf
static int cap_interrupt_unset = false;
57 fc87e185 Alexander Graf
static int cap_interrupt_level = false;
58 90dc8812 Scott Wood
static int cap_segstate;
59 90dc8812 Scott Wood
static int cap_booke_sregs;
60 e97c3636 David Gibson
static int cap_ppc_smt;
61 354ac20a David Gibson
static int cap_ppc_rma;
62 0f5cb298 David Gibson
static int cap_spapr_tce;
63 f1af19d7 David Gibson
static int cap_hior;
64 fc87e185 Alexander Graf
65 c821c2bd Alexander Graf
/* XXX We have a race condition where we actually have a level triggered
66 c821c2bd Alexander Graf
 *     interrupt, but the infrastructure can't expose that yet, so the guest
67 c821c2bd Alexander Graf
 *     takes but ignores it, goes to sleep and never gets notified that there's
68 c821c2bd Alexander Graf
 *     still an interrupt pending.
69 c6a94ba5 Alexander Graf
 *
70 c821c2bd Alexander Graf
 *     As a quick workaround, let's just wake up again 20 ms after we injected
71 c821c2bd Alexander Graf
 *     an interrupt. That way we can assure that we're always reinjecting
72 c821c2bd Alexander Graf
 *     interrupts in case the guest swallowed them.
73 c6a94ba5 Alexander Graf
 */
74 c6a94ba5 Alexander Graf
static QEMUTimer *idle_timer;
75 c6a94ba5 Alexander Graf
76 d5a68146 Andreas Färber
static void kvm_kick_cpu(void *opaque)
77 c6a94ba5 Alexander Graf
{
78 d5a68146 Andreas Färber
    PowerPCCPU *cpu = opaque;
79 d5a68146 Andreas Färber
80 c08d7424 Andreas Färber
    qemu_cpu_kick(CPU(cpu));
81 c6a94ba5 Alexander Graf
}
82 c6a94ba5 Alexander Graf
83 cad1e282 Jan Kiszka
int kvm_arch_init(KVMState *s)
84 d76d1650 aurel32
{
85 fc87e185 Alexander Graf
    cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
86 fc87e185 Alexander Graf
    cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
87 90dc8812 Scott Wood
    cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
88 90dc8812 Scott Wood
    cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
89 e97c3636 David Gibson
    cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
90 354ac20a David Gibson
    cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
91 0f5cb298 David Gibson
    cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
92 f1af19d7 David Gibson
    cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
93 fc87e185 Alexander Graf
94 fc87e185 Alexander Graf
    if (!cap_interrupt_level) {
95 fc87e185 Alexander Graf
        fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
96 fc87e185 Alexander Graf
                        "VM to stall at times!\n");
97 fc87e185 Alexander Graf
    }
98 fc87e185 Alexander Graf
99 d76d1650 aurel32
    return 0;
100 d76d1650 aurel32
}
101 d76d1650 aurel32
102 1bc22652 Andreas Färber
static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
103 d76d1650 aurel32
{
104 1bc22652 Andreas Färber
    CPUPPCState *cenv = &cpu->env;
105 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
106 861bbc80 Alexander Graf
    struct kvm_sregs sregs;
107 5666ca4a Scott Wood
    int ret;
108 5666ca4a Scott Wood
109 5666ca4a Scott Wood
    if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
110 64e07be5 Alexander Graf
        /* What we're really trying to say is "if we're on BookE, we use
111 64e07be5 Alexander Graf
           the native PVR for now". This is the only sane way to check
112 64e07be5 Alexander Graf
           it though, so we potentially confuse users that they can run
113 64e07be5 Alexander Graf
           BookE guests on BookS. Let's hope nobody dares enough :) */
114 5666ca4a Scott Wood
        return 0;
115 5666ca4a Scott Wood
    } else {
116 90dc8812 Scott Wood
        if (!cap_segstate) {
117 64e07be5 Alexander Graf
            fprintf(stderr, "kvm error: missing PVR setting capability\n");
118 64e07be5 Alexander Graf
            return -ENOSYS;
119 5666ca4a Scott Wood
        }
120 5666ca4a Scott Wood
    }
121 5666ca4a Scott Wood
122 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
123 5666ca4a Scott Wood
    if (ret) {
124 5666ca4a Scott Wood
        return ret;
125 5666ca4a Scott Wood
    }
126 861bbc80 Alexander Graf
127 861bbc80 Alexander Graf
    sregs.pvr = cenv->spr[SPR_PVR];
128 1bc22652 Andreas Färber
    return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
129 5666ca4a Scott Wood
}
130 5666ca4a Scott Wood
131 93dd5e85 Scott Wood
/* Set up a shared TLB array with KVM */
132 1bc22652 Andreas Färber
static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
133 93dd5e85 Scott Wood
{
134 1bc22652 Andreas Färber
    CPUPPCState *env = &cpu->env;
135 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
136 93dd5e85 Scott Wood
    struct kvm_book3e_206_tlb_params params = {};
137 93dd5e85 Scott Wood
    struct kvm_config_tlb cfg = {};
138 93dd5e85 Scott Wood
    struct kvm_enable_cap encap = {};
139 93dd5e85 Scott Wood
    unsigned int entries = 0;
140 93dd5e85 Scott Wood
    int ret, i;
141 93dd5e85 Scott Wood
142 93dd5e85 Scott Wood
    if (!kvm_enabled() ||
143 a60f24b5 Andreas Färber
        !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
144 93dd5e85 Scott Wood
        return 0;
145 93dd5e85 Scott Wood
    }
146 93dd5e85 Scott Wood
147 93dd5e85 Scott Wood
    assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
148 93dd5e85 Scott Wood
149 93dd5e85 Scott Wood
    for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
150 93dd5e85 Scott Wood
        params.tlb_sizes[i] = booke206_tlb_size(env, i);
151 93dd5e85 Scott Wood
        params.tlb_ways[i] = booke206_tlb_ways(env, i);
152 93dd5e85 Scott Wood
        entries += params.tlb_sizes[i];
153 93dd5e85 Scott Wood
    }
154 93dd5e85 Scott Wood
155 93dd5e85 Scott Wood
    assert(entries == env->nb_tlb);
156 93dd5e85 Scott Wood
    assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
157 93dd5e85 Scott Wood
158 93dd5e85 Scott Wood
    env->tlb_dirty = true;
159 93dd5e85 Scott Wood
160 93dd5e85 Scott Wood
    cfg.array = (uintptr_t)env->tlb.tlbm;
161 93dd5e85 Scott Wood
    cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
162 93dd5e85 Scott Wood
    cfg.params = (uintptr_t)&params;
163 93dd5e85 Scott Wood
    cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
164 93dd5e85 Scott Wood
165 93dd5e85 Scott Wood
    encap.cap = KVM_CAP_SW_TLB;
166 93dd5e85 Scott Wood
    encap.args[0] = (uintptr_t)&cfg;
167 93dd5e85 Scott Wood
168 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
169 93dd5e85 Scott Wood
    if (ret < 0) {
170 93dd5e85 Scott Wood
        fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
171 93dd5e85 Scott Wood
                __func__, strerror(-ret));
172 93dd5e85 Scott Wood
        return ret;
173 93dd5e85 Scott Wood
    }
174 93dd5e85 Scott Wood
175 93dd5e85 Scott Wood
    env->kvm_sw_tlb = true;
176 93dd5e85 Scott Wood
    return 0;
177 93dd5e85 Scott Wood
}
178 93dd5e85 Scott Wood
179 4656e1f0 Benjamin Herrenschmidt
180 4656e1f0 Benjamin Herrenschmidt
#if defined(TARGET_PPC64)
181 a60f24b5 Andreas Färber
static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
182 4656e1f0 Benjamin Herrenschmidt
                                       struct kvm_ppc_smmu_info *info)
183 4656e1f0 Benjamin Herrenschmidt
{
184 a60f24b5 Andreas Färber
    CPUPPCState *env = &cpu->env;
185 a60f24b5 Andreas Färber
    CPUState *cs = CPU(cpu);
186 a60f24b5 Andreas Färber
187 4656e1f0 Benjamin Herrenschmidt
    memset(info, 0, sizeof(*info));
188 4656e1f0 Benjamin Herrenschmidt
189 4656e1f0 Benjamin Herrenschmidt
    /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
190 4656e1f0 Benjamin Herrenschmidt
     * need to "guess" what the supported page sizes are.
191 4656e1f0 Benjamin Herrenschmidt
     *
192 4656e1f0 Benjamin Herrenschmidt
     * For that to work we make a few assumptions:
193 4656e1f0 Benjamin Herrenschmidt
     *
194 4656e1f0 Benjamin Herrenschmidt
     * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
195 4656e1f0 Benjamin Herrenschmidt
     *   KVM which only supports 4K and 16M pages, but supports them
196 4656e1f0 Benjamin Herrenschmidt
     *   regardless of the backing store characteritics. We also don't
197 4656e1f0 Benjamin Herrenschmidt
     *   support 1T segments.
198 4656e1f0 Benjamin Herrenschmidt
     *
199 4656e1f0 Benjamin Herrenschmidt
     *   This is safe as if HV KVM ever supports that capability or PR
200 4656e1f0 Benjamin Herrenschmidt
     *   KVM grows supports for more page/segment sizes, those versions
201 4656e1f0 Benjamin Herrenschmidt
     *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
202 4656e1f0 Benjamin Herrenschmidt
     *   will not hit this fallback
203 4656e1f0 Benjamin Herrenschmidt
     *
204 4656e1f0 Benjamin Herrenschmidt
     * - Else we are running HV KVM. This means we only support page
205 4656e1f0 Benjamin Herrenschmidt
     *   sizes that fit in the backing store. Additionally we only
206 4656e1f0 Benjamin Herrenschmidt
     *   advertize 64K pages if the processor is ARCH 2.06 and we assume
207 4656e1f0 Benjamin Herrenschmidt
     *   P7 encodings for the SLB and hash table. Here too, we assume
208 4656e1f0 Benjamin Herrenschmidt
     *   support for any newer processor will mean a kernel that
209 4656e1f0 Benjamin Herrenschmidt
     *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
210 4656e1f0 Benjamin Herrenschmidt
     *   this fallback.
211 4656e1f0 Benjamin Herrenschmidt
     */
212 a60f24b5 Andreas Färber
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
213 4656e1f0 Benjamin Herrenschmidt
        /* No flags */
214 4656e1f0 Benjamin Herrenschmidt
        info->flags = 0;
215 4656e1f0 Benjamin Herrenschmidt
        info->slb_size = 64;
216 4656e1f0 Benjamin Herrenschmidt
217 4656e1f0 Benjamin Herrenschmidt
        /* Standard 4k base page size segment */
218 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].page_shift = 12;
219 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].slb_enc = 0;
220 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].enc[0].page_shift = 12;
221 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].enc[0].pte_enc = 0;
222 4656e1f0 Benjamin Herrenschmidt
223 4656e1f0 Benjamin Herrenschmidt
        /* Standard 16M large page size segment */
224 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].page_shift = 24;
225 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].slb_enc = SLB_VSID_L;
226 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].enc[0].page_shift = 24;
227 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].enc[0].pte_enc = 0;
228 4656e1f0 Benjamin Herrenschmidt
    } else {
229 4656e1f0 Benjamin Herrenschmidt
        int i = 0;
230 4656e1f0 Benjamin Herrenschmidt
231 4656e1f0 Benjamin Herrenschmidt
        /* HV KVM has backing store size restrictions */
232 4656e1f0 Benjamin Herrenschmidt
        info->flags = KVM_PPC_PAGE_SIZES_REAL;
233 4656e1f0 Benjamin Herrenschmidt
234 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model & POWERPC_MMU_1TSEG) {
235 4656e1f0 Benjamin Herrenschmidt
            info->flags |= KVM_PPC_1T_SEGMENTS;
236 4656e1f0 Benjamin Herrenschmidt
        }
237 4656e1f0 Benjamin Herrenschmidt
238 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model == POWERPC_MMU_2_06) {
239 4656e1f0 Benjamin Herrenschmidt
            info->slb_size = 32;
240 4656e1f0 Benjamin Herrenschmidt
        } else {
241 4656e1f0 Benjamin Herrenschmidt
            info->slb_size = 64;
242 4656e1f0 Benjamin Herrenschmidt
        }
243 4656e1f0 Benjamin Herrenschmidt
244 4656e1f0 Benjamin Herrenschmidt
        /* Standard 4k base page size segment */
245 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].page_shift = 12;
246 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].slb_enc = 0;
247 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].page_shift = 12;
248 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].pte_enc = 0;
249 4656e1f0 Benjamin Herrenschmidt
        i++;
250 4656e1f0 Benjamin Herrenschmidt
251 4656e1f0 Benjamin Herrenschmidt
        /* 64K on MMU 2.06 */
252 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model == POWERPC_MMU_2_06) {
253 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].page_shift = 16;
254 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].slb_enc = 0x110;
255 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].enc[0].page_shift = 16;
256 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].enc[0].pte_enc = 1;
257 4656e1f0 Benjamin Herrenschmidt
            i++;
258 4656e1f0 Benjamin Herrenschmidt
        }
259 4656e1f0 Benjamin Herrenschmidt
260 4656e1f0 Benjamin Herrenschmidt
        /* Standard 16M large page size segment */
261 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].page_shift = 24;
262 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].slb_enc = SLB_VSID_L;
263 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].page_shift = 24;
264 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].pte_enc = 0;
265 4656e1f0 Benjamin Herrenschmidt
    }
266 4656e1f0 Benjamin Herrenschmidt
}
267 4656e1f0 Benjamin Herrenschmidt
268 a60f24b5 Andreas Färber
static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
269 4656e1f0 Benjamin Herrenschmidt
{
270 a60f24b5 Andreas Färber
    CPUState *cs = CPU(cpu);
271 4656e1f0 Benjamin Herrenschmidt
    int ret;
272 4656e1f0 Benjamin Herrenschmidt
273 a60f24b5 Andreas Färber
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
274 a60f24b5 Andreas Färber
        ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
275 4656e1f0 Benjamin Herrenschmidt
        if (ret == 0) {
276 4656e1f0 Benjamin Herrenschmidt
            return;
277 4656e1f0 Benjamin Herrenschmidt
        }
278 4656e1f0 Benjamin Herrenschmidt
    }
279 4656e1f0 Benjamin Herrenschmidt
280 a60f24b5 Andreas Färber
    kvm_get_fallback_smmu_info(cpu, info);
281 4656e1f0 Benjamin Herrenschmidt
}
282 4656e1f0 Benjamin Herrenschmidt
283 4656e1f0 Benjamin Herrenschmidt
static long getrampagesize(void)
284 4656e1f0 Benjamin Herrenschmidt
{
285 4656e1f0 Benjamin Herrenschmidt
    struct statfs fs;
286 4656e1f0 Benjamin Herrenschmidt
    int ret;
287 4656e1f0 Benjamin Herrenschmidt
288 4656e1f0 Benjamin Herrenschmidt
    if (!mem_path) {
289 4656e1f0 Benjamin Herrenschmidt
        /* guest RAM is backed by normal anonymous pages */
290 4656e1f0 Benjamin Herrenschmidt
        return getpagesize();
291 4656e1f0 Benjamin Herrenschmidt
    }
292 4656e1f0 Benjamin Herrenschmidt
293 4656e1f0 Benjamin Herrenschmidt
    do {
294 4656e1f0 Benjamin Herrenschmidt
        ret = statfs(mem_path, &fs);
295 4656e1f0 Benjamin Herrenschmidt
    } while (ret != 0 && errno == EINTR);
296 4656e1f0 Benjamin Herrenschmidt
297 4656e1f0 Benjamin Herrenschmidt
    if (ret != 0) {
298 4656e1f0 Benjamin Herrenschmidt
        fprintf(stderr, "Couldn't statfs() memory path: %s\n",
299 4656e1f0 Benjamin Herrenschmidt
                strerror(errno));
300 4656e1f0 Benjamin Herrenschmidt
        exit(1);
301 4656e1f0 Benjamin Herrenschmidt
    }
302 4656e1f0 Benjamin Herrenschmidt
303 4656e1f0 Benjamin Herrenschmidt
#define HUGETLBFS_MAGIC       0x958458f6
304 4656e1f0 Benjamin Herrenschmidt
305 4656e1f0 Benjamin Herrenschmidt
    if (fs.f_type != HUGETLBFS_MAGIC) {
306 4656e1f0 Benjamin Herrenschmidt
        /* Explicit mempath, but it's ordinary pages */
307 4656e1f0 Benjamin Herrenschmidt
        return getpagesize();
308 4656e1f0 Benjamin Herrenschmidt
    }
309 4656e1f0 Benjamin Herrenschmidt
310 4656e1f0 Benjamin Herrenschmidt
    /* It's hugepage, return the huge page size */
311 4656e1f0 Benjamin Herrenschmidt
    return fs.f_bsize;
312 4656e1f0 Benjamin Herrenschmidt
}
313 4656e1f0 Benjamin Herrenschmidt
314 4656e1f0 Benjamin Herrenschmidt
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
315 4656e1f0 Benjamin Herrenschmidt
{
316 4656e1f0 Benjamin Herrenschmidt
    if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
317 4656e1f0 Benjamin Herrenschmidt
        return true;
318 4656e1f0 Benjamin Herrenschmidt
    }
319 4656e1f0 Benjamin Herrenschmidt
320 4656e1f0 Benjamin Herrenschmidt
    return (1ul << shift) <= rampgsize;
321 4656e1f0 Benjamin Herrenschmidt
}
322 4656e1f0 Benjamin Herrenschmidt
323 a60f24b5 Andreas Färber
static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
324 4656e1f0 Benjamin Herrenschmidt
{
325 4656e1f0 Benjamin Herrenschmidt
    static struct kvm_ppc_smmu_info smmu_info;
326 4656e1f0 Benjamin Herrenschmidt
    static bool has_smmu_info;
327 a60f24b5 Andreas Färber
    CPUPPCState *env = &cpu->env;
328 4656e1f0 Benjamin Herrenschmidt
    long rampagesize;
329 4656e1f0 Benjamin Herrenschmidt
    int iq, ik, jq, jk;
330 4656e1f0 Benjamin Herrenschmidt
331 4656e1f0 Benjamin Herrenschmidt
    /* We only handle page sizes for 64-bit server guests for now */
332 4656e1f0 Benjamin Herrenschmidt
    if (!(env->mmu_model & POWERPC_MMU_64)) {
333 4656e1f0 Benjamin Herrenschmidt
        return;
334 4656e1f0 Benjamin Herrenschmidt
    }
335 4656e1f0 Benjamin Herrenschmidt
336 4656e1f0 Benjamin Herrenschmidt
    /* Collect MMU info from kernel if not already */
337 4656e1f0 Benjamin Herrenschmidt
    if (!has_smmu_info) {
338 a60f24b5 Andreas Färber
        kvm_get_smmu_info(cpu, &smmu_info);
339 4656e1f0 Benjamin Herrenschmidt
        has_smmu_info = true;
340 4656e1f0 Benjamin Herrenschmidt
    }
341 4656e1f0 Benjamin Herrenschmidt
342 4656e1f0 Benjamin Herrenschmidt
    rampagesize = getrampagesize();
343 4656e1f0 Benjamin Herrenschmidt
344 4656e1f0 Benjamin Herrenschmidt
    /* Convert to QEMU form */
345 4656e1f0 Benjamin Herrenschmidt
    memset(&env->sps, 0, sizeof(env->sps));
346 4656e1f0 Benjamin Herrenschmidt
347 4656e1f0 Benjamin Herrenschmidt
    for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
348 4656e1f0 Benjamin Herrenschmidt
        struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
349 4656e1f0 Benjamin Herrenschmidt
        struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
350 4656e1f0 Benjamin Herrenschmidt
351 4656e1f0 Benjamin Herrenschmidt
        if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
352 4656e1f0 Benjamin Herrenschmidt
                                 ksps->page_shift)) {
353 4656e1f0 Benjamin Herrenschmidt
            continue;
354 4656e1f0 Benjamin Herrenschmidt
        }
355 4656e1f0 Benjamin Herrenschmidt
        qsps->page_shift = ksps->page_shift;
356 4656e1f0 Benjamin Herrenschmidt
        qsps->slb_enc = ksps->slb_enc;
357 4656e1f0 Benjamin Herrenschmidt
        for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
358 4656e1f0 Benjamin Herrenschmidt
            if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
359 4656e1f0 Benjamin Herrenschmidt
                                     ksps->enc[jk].page_shift)) {
360 4656e1f0 Benjamin Herrenschmidt
                continue;
361 4656e1f0 Benjamin Herrenschmidt
            }
362 4656e1f0 Benjamin Herrenschmidt
            qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
363 4656e1f0 Benjamin Herrenschmidt
            qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
364 4656e1f0 Benjamin Herrenschmidt
            if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
365 4656e1f0 Benjamin Herrenschmidt
                break;
366 4656e1f0 Benjamin Herrenschmidt
            }
367 4656e1f0 Benjamin Herrenschmidt
        }
368 4656e1f0 Benjamin Herrenschmidt
        if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
369 4656e1f0 Benjamin Herrenschmidt
            break;
370 4656e1f0 Benjamin Herrenschmidt
        }
371 4656e1f0 Benjamin Herrenschmidt
    }
372 4656e1f0 Benjamin Herrenschmidt
    env->slb_nr = smmu_info.slb_size;
373 4656e1f0 Benjamin Herrenschmidt
    if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) {
374 4656e1f0 Benjamin Herrenschmidt
        env->mmu_model |= POWERPC_MMU_1TSEG;
375 4656e1f0 Benjamin Herrenschmidt
    } else {
376 4656e1f0 Benjamin Herrenschmidt
        env->mmu_model &= ~POWERPC_MMU_1TSEG;
377 4656e1f0 Benjamin Herrenschmidt
    }
378 4656e1f0 Benjamin Herrenschmidt
}
379 4656e1f0 Benjamin Herrenschmidt
#else /* defined (TARGET_PPC64) */
380 4656e1f0 Benjamin Herrenschmidt
381 a60f24b5 Andreas Färber
static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
382 4656e1f0 Benjamin Herrenschmidt
{
383 4656e1f0 Benjamin Herrenschmidt
}
384 4656e1f0 Benjamin Herrenschmidt
385 4656e1f0 Benjamin Herrenschmidt
#endif /* !defined (TARGET_PPC64) */
386 4656e1f0 Benjamin Herrenschmidt
387 b164e48e Eduardo Habkost
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
388 b164e48e Eduardo Habkost
{
389 b164e48e Eduardo Habkost
    return cpu->cpu_index;
390 b164e48e Eduardo Habkost
}
391 b164e48e Eduardo Habkost
392 20d695a9 Andreas Färber
int kvm_arch_init_vcpu(CPUState *cs)
393 5666ca4a Scott Wood
{
394 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
395 20d695a9 Andreas Färber
    CPUPPCState *cenv = &cpu->env;
396 5666ca4a Scott Wood
    int ret;
397 5666ca4a Scott Wood
398 4656e1f0 Benjamin Herrenschmidt
    /* Gather server mmu info from KVM and update the CPU state */
399 a60f24b5 Andreas Färber
    kvm_fixup_page_sizes(cpu);
400 4656e1f0 Benjamin Herrenschmidt
401 4656e1f0 Benjamin Herrenschmidt
    /* Synchronize sregs with kvm */
402 1bc22652 Andreas Färber
    ret = kvm_arch_sync_sregs(cpu);
403 5666ca4a Scott Wood
    if (ret) {
404 5666ca4a Scott Wood
        return ret;
405 5666ca4a Scott Wood
    }
406 861bbc80 Alexander Graf
407 d5a68146 Andreas Färber
    idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_cpu, cpu);
408 c821c2bd Alexander Graf
409 93dd5e85 Scott Wood
    /* Some targets support access to KVM's guest TLB. */
410 93dd5e85 Scott Wood
    switch (cenv->mmu_model) {
411 93dd5e85 Scott Wood
    case POWERPC_MMU_BOOKE206:
412 1bc22652 Andreas Färber
        ret = kvm_booke206_tlb_init(cpu);
413 93dd5e85 Scott Wood
        break;
414 93dd5e85 Scott Wood
    default:
415 93dd5e85 Scott Wood
        break;
416 93dd5e85 Scott Wood
    }
417 93dd5e85 Scott Wood
418 861bbc80 Alexander Graf
    return ret;
419 d76d1650 aurel32
}
420 d76d1650 aurel32
421 20d695a9 Andreas Färber
void kvm_arch_reset_vcpu(CPUState *cpu)
422 caa5af0f Jan Kiszka
{
423 caa5af0f Jan Kiszka
}
424 caa5af0f Jan Kiszka
425 1bc22652 Andreas Färber
static void kvm_sw_tlb_put(PowerPCCPU *cpu)
426 93dd5e85 Scott Wood
{
427 1bc22652 Andreas Färber
    CPUPPCState *env = &cpu->env;
428 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
429 93dd5e85 Scott Wood
    struct kvm_dirty_tlb dirty_tlb;
430 93dd5e85 Scott Wood
    unsigned char *bitmap;
431 93dd5e85 Scott Wood
    int ret;
432 93dd5e85 Scott Wood
433 93dd5e85 Scott Wood
    if (!env->kvm_sw_tlb) {
434 93dd5e85 Scott Wood
        return;
435 93dd5e85 Scott Wood
    }
436 93dd5e85 Scott Wood
437 93dd5e85 Scott Wood
    bitmap = g_malloc((env->nb_tlb + 7) / 8);
438 93dd5e85 Scott Wood
    memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
439 93dd5e85 Scott Wood
440 93dd5e85 Scott Wood
    dirty_tlb.bitmap = (uintptr_t)bitmap;
441 93dd5e85 Scott Wood
    dirty_tlb.num_dirty = env->nb_tlb;
442 93dd5e85 Scott Wood
443 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
444 93dd5e85 Scott Wood
    if (ret) {
445 93dd5e85 Scott Wood
        fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
446 93dd5e85 Scott Wood
                __func__, strerror(-ret));
447 93dd5e85 Scott Wood
    }
448 93dd5e85 Scott Wood
449 93dd5e85 Scott Wood
    g_free(bitmap);
450 93dd5e85 Scott Wood
}
451 93dd5e85 Scott Wood
452 20d695a9 Andreas Färber
int kvm_arch_put_registers(CPUState *cs, int level)
453 d76d1650 aurel32
{
454 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
455 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
456 d76d1650 aurel32
    struct kvm_regs regs;
457 d76d1650 aurel32
    int ret;
458 d76d1650 aurel32
    int i;
459 d76d1650 aurel32
460 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
461 1bc22652 Andreas Färber
    if (ret < 0) {
462 d76d1650 aurel32
        return ret;
463 1bc22652 Andreas Färber
    }
464 d76d1650 aurel32
465 d76d1650 aurel32
    regs.ctr = env->ctr;
466 d76d1650 aurel32
    regs.lr  = env->lr;
467 d76d1650 aurel32
    regs.xer = env->xer;
468 d76d1650 aurel32
    regs.msr = env->msr;
469 d76d1650 aurel32
    regs.pc = env->nip;
470 d76d1650 aurel32
471 d76d1650 aurel32
    regs.srr0 = env->spr[SPR_SRR0];
472 d76d1650 aurel32
    regs.srr1 = env->spr[SPR_SRR1];
473 d76d1650 aurel32
474 d76d1650 aurel32
    regs.sprg0 = env->spr[SPR_SPRG0];
475 d76d1650 aurel32
    regs.sprg1 = env->spr[SPR_SPRG1];
476 d76d1650 aurel32
    regs.sprg2 = env->spr[SPR_SPRG2];
477 d76d1650 aurel32
    regs.sprg3 = env->spr[SPR_SPRG3];
478 d76d1650 aurel32
    regs.sprg4 = env->spr[SPR_SPRG4];
479 d76d1650 aurel32
    regs.sprg5 = env->spr[SPR_SPRG5];
480 d76d1650 aurel32
    regs.sprg6 = env->spr[SPR_SPRG6];
481 d76d1650 aurel32
    regs.sprg7 = env->spr[SPR_SPRG7];
482 d76d1650 aurel32
483 90dc8812 Scott Wood
    regs.pid = env->spr[SPR_BOOKE_PID];
484 90dc8812 Scott Wood
485 d76d1650 aurel32
    for (i = 0;i < 32; i++)
486 d76d1650 aurel32
        regs.gpr[i] = env->gpr[i];
487 d76d1650 aurel32
488 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
489 d76d1650 aurel32
    if (ret < 0)
490 d76d1650 aurel32
        return ret;
491 d76d1650 aurel32
492 93dd5e85 Scott Wood
    if (env->tlb_dirty) {
493 1bc22652 Andreas Färber
        kvm_sw_tlb_put(cpu);
494 93dd5e85 Scott Wood
        env->tlb_dirty = false;
495 93dd5e85 Scott Wood
    }
496 93dd5e85 Scott Wood
497 f1af19d7 David Gibson
    if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
498 f1af19d7 David Gibson
        struct kvm_sregs sregs;
499 f1af19d7 David Gibson
500 f1af19d7 David Gibson
        sregs.pvr = env->spr[SPR_PVR];
501 f1af19d7 David Gibson
502 f1af19d7 David Gibson
        sregs.u.s.sdr1 = env->spr[SPR_SDR1];
503 f1af19d7 David Gibson
504 f1af19d7 David Gibson
        /* Sync SLB */
505 f1af19d7 David Gibson
#ifdef TARGET_PPC64
506 f1af19d7 David Gibson
        for (i = 0; i < 64; i++) {
507 f1af19d7 David Gibson
            sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
508 f1af19d7 David Gibson
            sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
509 f1af19d7 David Gibson
        }
510 f1af19d7 David Gibson
#endif
511 f1af19d7 David Gibson
512 f1af19d7 David Gibson
        /* Sync SRs */
513 f1af19d7 David Gibson
        for (i = 0; i < 16; i++) {
514 f1af19d7 David Gibson
            sregs.u.s.ppc32.sr[i] = env->sr[i];
515 f1af19d7 David Gibson
        }
516 f1af19d7 David Gibson
517 f1af19d7 David Gibson
        /* Sync BATs */
518 f1af19d7 David Gibson
        for (i = 0; i < 8; i++) {
519 ef8beb0e Alexander Graf
            /* Beware. We have to swap upper and lower bits here */
520 ef8beb0e Alexander Graf
            sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
521 ef8beb0e Alexander Graf
                | env->DBAT[1][i];
522 ef8beb0e Alexander Graf
            sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
523 ef8beb0e Alexander Graf
                | env->IBAT[1][i];
524 f1af19d7 David Gibson
        }
525 f1af19d7 David Gibson
526 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
527 f1af19d7 David Gibson
        if (ret) {
528 f1af19d7 David Gibson
            return ret;
529 f1af19d7 David Gibson
        }
530 f1af19d7 David Gibson
    }
531 f1af19d7 David Gibson
532 f1af19d7 David Gibson
    if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
533 f1af19d7 David Gibson
        uint64_t hior = env->spr[SPR_HIOR];
534 f1af19d7 David Gibson
        struct kvm_one_reg reg = {
535 f1af19d7 David Gibson
            .id = KVM_REG_PPC_HIOR,
536 f1af19d7 David Gibson
            .addr = (uintptr_t) &hior,
537 f1af19d7 David Gibson
        };
538 f1af19d7 David Gibson
539 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
540 f1af19d7 David Gibson
        if (ret) {
541 f1af19d7 David Gibson
            return ret;
542 f1af19d7 David Gibson
        }
543 f1af19d7 David Gibson
    }
544 f1af19d7 David Gibson
545 d76d1650 aurel32
    return ret;
546 d76d1650 aurel32
}
547 d76d1650 aurel32
548 20d695a9 Andreas Färber
int kvm_arch_get_registers(CPUState *cs)
549 d76d1650 aurel32
{
550 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
551 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
552 d76d1650 aurel32
    struct kvm_regs regs;
553 ba5e5090 Alexander Graf
    struct kvm_sregs sregs;
554 90dc8812 Scott Wood
    uint32_t cr;
555 138b38b6 Alexander Graf
    int i, ret;
556 d76d1650 aurel32
557 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
558 d76d1650 aurel32
    if (ret < 0)
559 d76d1650 aurel32
        return ret;
560 d76d1650 aurel32
561 90dc8812 Scott Wood
    cr = regs.cr;
562 90dc8812 Scott Wood
    for (i = 7; i >= 0; i--) {
563 90dc8812 Scott Wood
        env->crf[i] = cr & 15;
564 90dc8812 Scott Wood
        cr >>= 4;
565 90dc8812 Scott Wood
    }
566 ba5e5090 Alexander Graf
567 d76d1650 aurel32
    env->ctr = regs.ctr;
568 d76d1650 aurel32
    env->lr = regs.lr;
569 d76d1650 aurel32
    env->xer = regs.xer;
570 d76d1650 aurel32
    env->msr = regs.msr;
571 d76d1650 aurel32
    env->nip = regs.pc;
572 d76d1650 aurel32
573 d76d1650 aurel32
    env->spr[SPR_SRR0] = regs.srr0;
574 d76d1650 aurel32
    env->spr[SPR_SRR1] = regs.srr1;
575 d76d1650 aurel32
576 d76d1650 aurel32
    env->spr[SPR_SPRG0] = regs.sprg0;
577 d76d1650 aurel32
    env->spr[SPR_SPRG1] = regs.sprg1;
578 d76d1650 aurel32
    env->spr[SPR_SPRG2] = regs.sprg2;
579 d76d1650 aurel32
    env->spr[SPR_SPRG3] = regs.sprg3;
580 d76d1650 aurel32
    env->spr[SPR_SPRG4] = regs.sprg4;
581 d76d1650 aurel32
    env->spr[SPR_SPRG5] = regs.sprg5;
582 d76d1650 aurel32
    env->spr[SPR_SPRG6] = regs.sprg6;
583 d76d1650 aurel32
    env->spr[SPR_SPRG7] = regs.sprg7;
584 d76d1650 aurel32
585 90dc8812 Scott Wood
    env->spr[SPR_BOOKE_PID] = regs.pid;
586 90dc8812 Scott Wood
587 d76d1650 aurel32
    for (i = 0;i < 32; i++)
588 d76d1650 aurel32
        env->gpr[i] = regs.gpr[i];
589 d76d1650 aurel32
590 90dc8812 Scott Wood
    if (cap_booke_sregs) {
591 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
592 90dc8812 Scott Wood
        if (ret < 0) {
593 90dc8812 Scott Wood
            return ret;
594 90dc8812 Scott Wood
        }
595 90dc8812 Scott Wood
596 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_BASE) {
597 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
598 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
599 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
600 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
601 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
602 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
603 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
604 90dc8812 Scott Wood
            env->spr[SPR_DECR] = sregs.u.e.dec;
605 90dc8812 Scott Wood
            env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
606 90dc8812 Scott Wood
            env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
607 90dc8812 Scott Wood
            env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
608 90dc8812 Scott Wood
        }
609 90dc8812 Scott Wood
610 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
611 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
612 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
613 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
614 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
615 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
616 90dc8812 Scott Wood
        }
617 90dc8812 Scott Wood
618 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_64) {
619 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
620 90dc8812 Scott Wood
        }
621 90dc8812 Scott Wood
622 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
623 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
624 90dc8812 Scott Wood
        }
625 90dc8812 Scott Wood
626 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
627 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
628 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
629 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
630 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
631 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
632 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
633 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
634 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
635 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
636 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
637 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
638 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
639 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
640 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
641 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
642 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
643 90dc8812 Scott Wood
644 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_SPE) {
645 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
646 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
647 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
648 90dc8812 Scott Wood
            }
649 90dc8812 Scott Wood
650 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_PM) {
651 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
652 90dc8812 Scott Wood
            }
653 90dc8812 Scott Wood
654 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_PC) {
655 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
656 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
657 90dc8812 Scott Wood
            }
658 90dc8812 Scott Wood
        }
659 90dc8812 Scott Wood
660 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
661 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
662 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
663 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
664 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
665 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
666 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
667 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
668 90dc8812 Scott Wood
            env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
669 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
670 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
671 90dc8812 Scott Wood
        }
672 90dc8812 Scott Wood
673 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_EXP) {
674 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
675 90dc8812 Scott Wood
        }
676 90dc8812 Scott Wood
677 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_PD) {
678 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
679 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
680 90dc8812 Scott Wood
        }
681 90dc8812 Scott Wood
682 90dc8812 Scott Wood
        if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
683 90dc8812 Scott Wood
            env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
684 90dc8812 Scott Wood
            env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
685 90dc8812 Scott Wood
            env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
686 90dc8812 Scott Wood
687 90dc8812 Scott Wood
            if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
688 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
689 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
690 90dc8812 Scott Wood
            }
691 90dc8812 Scott Wood
        }
692 fafc0b6a Alexander Graf
    }
693 90dc8812 Scott Wood
694 90dc8812 Scott Wood
    if (cap_segstate) {
695 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
696 90dc8812 Scott Wood
        if (ret < 0) {
697 90dc8812 Scott Wood
            return ret;
698 90dc8812 Scott Wood
        }
699 90dc8812 Scott Wood
700 bb593904 David Gibson
        ppc_store_sdr1(env, sregs.u.s.sdr1);
701 ba5e5090 Alexander Graf
702 ba5e5090 Alexander Graf
        /* Sync SLB */
703 82c09f2f Alexander Graf
#ifdef TARGET_PPC64
704 ba5e5090 Alexander Graf
        for (i = 0; i < 64; i++) {
705 ba5e5090 Alexander Graf
            ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
706 ba5e5090 Alexander Graf
                               sregs.u.s.ppc64.slb[i].slbv);
707 ba5e5090 Alexander Graf
        }
708 82c09f2f Alexander Graf
#endif
709 ba5e5090 Alexander Graf
710 ba5e5090 Alexander Graf
        /* Sync SRs */
711 ba5e5090 Alexander Graf
        for (i = 0; i < 16; i++) {
712 ba5e5090 Alexander Graf
            env->sr[i] = sregs.u.s.ppc32.sr[i];
713 ba5e5090 Alexander Graf
        }
714 ba5e5090 Alexander Graf
715 ba5e5090 Alexander Graf
        /* Sync BATs */
716 ba5e5090 Alexander Graf
        for (i = 0; i < 8; i++) {
717 ba5e5090 Alexander Graf
            env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
718 ba5e5090 Alexander Graf
            env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
719 ba5e5090 Alexander Graf
            env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
720 ba5e5090 Alexander Graf
            env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
721 ba5e5090 Alexander Graf
        }
722 fafc0b6a Alexander Graf
    }
723 ba5e5090 Alexander Graf
724 d76d1650 aurel32
    return 0;
725 d76d1650 aurel32
}
726 d76d1650 aurel32
727 1bc22652 Andreas Färber
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
728 fc87e185 Alexander Graf
{
729 fc87e185 Alexander Graf
    unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
730 fc87e185 Alexander Graf
731 fc87e185 Alexander Graf
    if (irq != PPC_INTERRUPT_EXT) {
732 fc87e185 Alexander Graf
        return 0;
733 fc87e185 Alexander Graf
    }
734 fc87e185 Alexander Graf
735 fc87e185 Alexander Graf
    if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
736 fc87e185 Alexander Graf
        return 0;
737 fc87e185 Alexander Graf
    }
738 fc87e185 Alexander Graf
739 1bc22652 Andreas Färber
    kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
740 fc87e185 Alexander Graf
741 fc87e185 Alexander Graf
    return 0;
742 fc87e185 Alexander Graf
}
743 fc87e185 Alexander Graf
744 16415335 Alexander Graf
#if defined(TARGET_PPCEMB)
745 16415335 Alexander Graf
#define PPC_INPUT_INT PPC40x_INPUT_INT
746 16415335 Alexander Graf
#elif defined(TARGET_PPC64)
747 16415335 Alexander Graf
#define PPC_INPUT_INT PPC970_INPUT_INT
748 16415335 Alexander Graf
#else
749 16415335 Alexander Graf
#define PPC_INPUT_INT PPC6xx_INPUT_INT
750 16415335 Alexander Graf
#endif
751 16415335 Alexander Graf
752 20d695a9 Andreas Färber
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
753 d76d1650 aurel32
{
754 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
755 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
756 d76d1650 aurel32
    int r;
757 d76d1650 aurel32
    unsigned irq;
758 d76d1650 aurel32
759 5cbdb3a3 Stefan Weil
    /* PowerPC QEMU tracks the various core input pins (interrupt, critical
760 d76d1650 aurel32
     * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
761 fc87e185 Alexander Graf
    if (!cap_interrupt_level &&
762 fc87e185 Alexander Graf
        run->ready_for_interrupt_injection &&
763 d76d1650 aurel32
        (env->interrupt_request & CPU_INTERRUPT_HARD) &&
764 16415335 Alexander Graf
        (env->irq_input_state & (1<<PPC_INPUT_INT)))
765 d76d1650 aurel32
    {
766 d76d1650 aurel32
        /* For now KVM disregards the 'irq' argument. However, in the
767 d76d1650 aurel32
         * future KVM could cache it in-kernel to avoid a heavyweight exit
768 d76d1650 aurel32
         * when reading the UIC.
769 d76d1650 aurel32
         */
770 fc87e185 Alexander Graf
        irq = KVM_INTERRUPT_SET;
771 d76d1650 aurel32
772 d76d1650 aurel32
        dprintf("injected interrupt %d\n", irq);
773 1bc22652 Andreas Färber
        r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
774 55e5c285 Andreas Färber
        if (r < 0) {
775 55e5c285 Andreas Färber
            printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
776 55e5c285 Andreas Färber
        }
777 c821c2bd Alexander Graf
778 c821c2bd Alexander Graf
        /* Always wake up soon in case the interrupt was level based */
779 74475455 Paolo Bonzini
        qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
780 c821c2bd Alexander Graf
                       (get_ticks_per_sec() / 50));
781 d76d1650 aurel32
    }
782 d76d1650 aurel32
783 d76d1650 aurel32
    /* We don't know if there are more interrupts pending after this. However,
784 d76d1650 aurel32
     * the guest will return to userspace in the course of handling this one
785 d76d1650 aurel32
     * anyways, so we will get a chance to deliver the rest. */
786 d76d1650 aurel32
}
787 d76d1650 aurel32
788 20d695a9 Andreas Färber
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
789 d76d1650 aurel32
{
790 d76d1650 aurel32
}
791 d76d1650 aurel32
792 20d695a9 Andreas Färber
int kvm_arch_process_async_events(CPUState *cs)
793 0af691d7 Marcelo Tosatti
{
794 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
795 20d695a9 Andreas Färber
    return cpu->env.halted;
796 0af691d7 Marcelo Tosatti
}
797 0af691d7 Marcelo Tosatti
798 1328c2bf Andreas Färber
static int kvmppc_handle_halt(CPUPPCState *env)
799 d76d1650 aurel32
{
800 d76d1650 aurel32
    if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
801 d76d1650 aurel32
        env->halted = 1;
802 d76d1650 aurel32
        env->exception_index = EXCP_HLT;
803 d76d1650 aurel32
    }
804 d76d1650 aurel32
805 bb4ea393 Jan Kiszka
    return 0;
806 d76d1650 aurel32
}
807 d76d1650 aurel32
808 d76d1650 aurel32
/* map dcr access to existing qemu dcr emulation */
809 1328c2bf Andreas Färber
static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
810 d76d1650 aurel32
{
811 d76d1650 aurel32
    if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
812 d76d1650 aurel32
        fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
813 d76d1650 aurel32
814 bb4ea393 Jan Kiszka
    return 0;
815 d76d1650 aurel32
}
816 d76d1650 aurel32
817 1328c2bf Andreas Färber
static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
818 d76d1650 aurel32
{
819 d76d1650 aurel32
    if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
820 d76d1650 aurel32
        fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
821 d76d1650 aurel32
822 bb4ea393 Jan Kiszka
    return 0;
823 d76d1650 aurel32
}
824 d76d1650 aurel32
825 20d695a9 Andreas Färber
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
826 d76d1650 aurel32
{
827 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
828 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
829 bb4ea393 Jan Kiszka
    int ret;
830 d76d1650 aurel32
831 d76d1650 aurel32
    switch (run->exit_reason) {
832 d76d1650 aurel32
    case KVM_EXIT_DCR:
833 d76d1650 aurel32
        if (run->dcr.is_write) {
834 d76d1650 aurel32
            dprintf("handle dcr write\n");
835 d76d1650 aurel32
            ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
836 d76d1650 aurel32
        } else {
837 d76d1650 aurel32
            dprintf("handle dcr read\n");
838 d76d1650 aurel32
            ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
839 d76d1650 aurel32
        }
840 d76d1650 aurel32
        break;
841 d76d1650 aurel32
    case KVM_EXIT_HLT:
842 d76d1650 aurel32
        dprintf("handle halt\n");
843 d76d1650 aurel32
        ret = kvmppc_handle_halt(env);
844 d76d1650 aurel32
        break;
845 f61b4bed Alexander Graf
#ifdef CONFIG_PSERIES
846 f61b4bed Alexander Graf
    case KVM_EXIT_PAPR_HCALL:
847 f61b4bed Alexander Graf
        dprintf("handle PAPR hypercall\n");
848 20d695a9 Andreas Färber
        run->papr_hcall.ret = spapr_hypercall(cpu,
849 aa100fa4 Andreas Färber
                                              run->papr_hcall.nr,
850 f61b4bed Alexander Graf
                                              run->papr_hcall.args);
851 78e8fde2 David Gibson
        ret = 0;
852 f61b4bed Alexander Graf
        break;
853 f61b4bed Alexander Graf
#endif
854 5b95b8b9 Alexander Graf
    case KVM_EXIT_EPR:
855 5b95b8b9 Alexander Graf
        dprintf("handle epr\n");
856 5b95b8b9 Alexander Graf
        run->epr.epr = ldl_phys(env->mpic_iack);
857 5b95b8b9 Alexander Graf
        ret = 0;
858 5b95b8b9 Alexander Graf
        break;
859 73aaec4a Jan Kiszka
    default:
860 73aaec4a Jan Kiszka
        fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
861 73aaec4a Jan Kiszka
        ret = -1;
862 73aaec4a Jan Kiszka
        break;
863 d76d1650 aurel32
    }
864 d76d1650 aurel32
865 d76d1650 aurel32
    return ret;
866 d76d1650 aurel32
}
867 d76d1650 aurel32
868 dc333cd6 Alexander Graf
static int read_cpuinfo(const char *field, char *value, int len)
869 dc333cd6 Alexander Graf
{
870 dc333cd6 Alexander Graf
    FILE *f;
871 dc333cd6 Alexander Graf
    int ret = -1;
872 dc333cd6 Alexander Graf
    int field_len = strlen(field);
873 dc333cd6 Alexander Graf
    char line[512];
874 dc333cd6 Alexander Graf
875 dc333cd6 Alexander Graf
    f = fopen("/proc/cpuinfo", "r");
876 dc333cd6 Alexander Graf
    if (!f) {
877 dc333cd6 Alexander Graf
        return -1;
878 dc333cd6 Alexander Graf
    }
879 dc333cd6 Alexander Graf
880 dc333cd6 Alexander Graf
    do {
881 dc333cd6 Alexander Graf
        if(!fgets(line, sizeof(line), f)) {
882 dc333cd6 Alexander Graf
            break;
883 dc333cd6 Alexander Graf
        }
884 dc333cd6 Alexander Graf
        if (!strncmp(line, field, field_len)) {
885 ae215068 Jim Meyering
            pstrcpy(value, len, line);
886 dc333cd6 Alexander Graf
            ret = 0;
887 dc333cd6 Alexander Graf
            break;
888 dc333cd6 Alexander Graf
        }
889 dc333cd6 Alexander Graf
    } while(*line);
890 dc333cd6 Alexander Graf
891 dc333cd6 Alexander Graf
    fclose(f);
892 dc333cd6 Alexander Graf
893 dc333cd6 Alexander Graf
    return ret;
894 dc333cd6 Alexander Graf
}
895 dc333cd6 Alexander Graf
896 dc333cd6 Alexander Graf
uint32_t kvmppc_get_tbfreq(void)
897 dc333cd6 Alexander Graf
{
898 dc333cd6 Alexander Graf
    char line[512];
899 dc333cd6 Alexander Graf
    char *ns;
900 dc333cd6 Alexander Graf
    uint32_t retval = get_ticks_per_sec();
901 dc333cd6 Alexander Graf
902 dc333cd6 Alexander Graf
    if (read_cpuinfo("timebase", line, sizeof(line))) {
903 dc333cd6 Alexander Graf
        return retval;
904 dc333cd6 Alexander Graf
    }
905 dc333cd6 Alexander Graf
906 dc333cd6 Alexander Graf
    if (!(ns = strchr(line, ':'))) {
907 dc333cd6 Alexander Graf
        return retval;
908 dc333cd6 Alexander Graf
    }
909 dc333cd6 Alexander Graf
910 dc333cd6 Alexander Graf
    ns++;
911 dc333cd6 Alexander Graf
912 dc333cd6 Alexander Graf
    retval = atoi(ns);
913 dc333cd6 Alexander Graf
    return retval;
914 dc333cd6 Alexander Graf
}
915 4513d923 Gleb Natapov
916 eadaada1 Alexander Graf
/* Try to find a device tree node for a CPU with clock-frequency property */
917 eadaada1 Alexander Graf
static int kvmppc_find_cpu_dt(char *buf, int buf_len)
918 eadaada1 Alexander Graf
{
919 eadaada1 Alexander Graf
    struct dirent *dirp;
920 eadaada1 Alexander Graf
    DIR *dp;
921 eadaada1 Alexander Graf
922 eadaada1 Alexander Graf
    if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
923 eadaada1 Alexander Graf
        printf("Can't open directory " PROC_DEVTREE_CPU "\n");
924 eadaada1 Alexander Graf
        return -1;
925 eadaada1 Alexander Graf
    }
926 eadaada1 Alexander Graf
927 eadaada1 Alexander Graf
    buf[0] = '\0';
928 eadaada1 Alexander Graf
    while ((dirp = readdir(dp)) != NULL) {
929 eadaada1 Alexander Graf
        FILE *f;
930 eadaada1 Alexander Graf
        snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
931 eadaada1 Alexander Graf
                 dirp->d_name);
932 eadaada1 Alexander Graf
        f = fopen(buf, "r");
933 eadaada1 Alexander Graf
        if (f) {
934 eadaada1 Alexander Graf
            snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
935 eadaada1 Alexander Graf
            fclose(f);
936 eadaada1 Alexander Graf
            break;
937 eadaada1 Alexander Graf
        }
938 eadaada1 Alexander Graf
        buf[0] = '\0';
939 eadaada1 Alexander Graf
    }
940 eadaada1 Alexander Graf
    closedir(dp);
941 eadaada1 Alexander Graf
    if (buf[0] == '\0') {
942 eadaada1 Alexander Graf
        printf("Unknown host!\n");
943 eadaada1 Alexander Graf
        return -1;
944 eadaada1 Alexander Graf
    }
945 eadaada1 Alexander Graf
946 eadaada1 Alexander Graf
    return 0;
947 eadaada1 Alexander Graf
}
948 eadaada1 Alexander Graf
949 9bc884b7 David Gibson
/* Read a CPU node property from the host device tree that's a single
950 9bc884b7 David Gibson
 * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
951 9bc884b7 David Gibson
 * (can't find or open the property, or doesn't understand the
952 9bc884b7 David Gibson
 * format) */
953 9bc884b7 David Gibson
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
954 eadaada1 Alexander Graf
{
955 9bc884b7 David Gibson
    char buf[PATH_MAX];
956 9bc884b7 David Gibson
    union {
957 9bc884b7 David Gibson
        uint32_t v32;
958 9bc884b7 David Gibson
        uint64_t v64;
959 9bc884b7 David Gibson
    } u;
960 eadaada1 Alexander Graf
    FILE *f;
961 eadaada1 Alexander Graf
    int len;
962 eadaada1 Alexander Graf
963 eadaada1 Alexander Graf
    if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
964 9bc884b7 David Gibson
        return -1;
965 eadaada1 Alexander Graf
    }
966 eadaada1 Alexander Graf
967 9bc884b7 David Gibson
    strncat(buf, "/", sizeof(buf) - strlen(buf));
968 9bc884b7 David Gibson
    strncat(buf, propname, sizeof(buf) - strlen(buf));
969 eadaada1 Alexander Graf
970 eadaada1 Alexander Graf
    f = fopen(buf, "rb");
971 eadaada1 Alexander Graf
    if (!f) {
972 eadaada1 Alexander Graf
        return -1;
973 eadaada1 Alexander Graf
    }
974 eadaada1 Alexander Graf
975 9bc884b7 David Gibson
    len = fread(&u, 1, sizeof(u), f);
976 eadaada1 Alexander Graf
    fclose(f);
977 eadaada1 Alexander Graf
    switch (len) {
978 9bc884b7 David Gibson
    case 4:
979 9bc884b7 David Gibson
        /* property is a 32-bit quantity */
980 9bc884b7 David Gibson
        return be32_to_cpu(u.v32);
981 9bc884b7 David Gibson
    case 8:
982 9bc884b7 David Gibson
        return be64_to_cpu(u.v64);
983 eadaada1 Alexander Graf
    }
984 eadaada1 Alexander Graf
985 eadaada1 Alexander Graf
    return 0;
986 eadaada1 Alexander Graf
}
987 eadaada1 Alexander Graf
988 9bc884b7 David Gibson
uint64_t kvmppc_get_clockfreq(void)
989 9bc884b7 David Gibson
{
990 9bc884b7 David Gibson
    return kvmppc_read_int_cpu_dt("clock-frequency");
991 9bc884b7 David Gibson
}
992 9bc884b7 David Gibson
993 6659394f David Gibson
uint32_t kvmppc_get_vmx(void)
994 6659394f David Gibson
{
995 6659394f David Gibson
    return kvmppc_read_int_cpu_dt("ibm,vmx");
996 6659394f David Gibson
}
997 6659394f David Gibson
998 6659394f David Gibson
uint32_t kvmppc_get_dfp(void)
999 6659394f David Gibson
{
1000 6659394f David Gibson
    return kvmppc_read_int_cpu_dt("ibm,dfp");
1001 6659394f David Gibson
}
1002 6659394f David Gibson
1003 1a61a9ae Stuart Yoder
static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1004 1a61a9ae Stuart Yoder
 {
1005 1a61a9ae Stuart Yoder
     PowerPCCPU *cpu = ppc_env_get_cpu(env);
1006 1a61a9ae Stuart Yoder
     CPUState *cs = CPU(cpu);
1007 1a61a9ae Stuart Yoder
1008 1a61a9ae Stuart Yoder
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1009 1a61a9ae Stuart Yoder
        !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1010 1a61a9ae Stuart Yoder
        return 0;
1011 1a61a9ae Stuart Yoder
    }
1012 1a61a9ae Stuart Yoder
1013 1a61a9ae Stuart Yoder
    return 1;
1014 1a61a9ae Stuart Yoder
}
1015 1a61a9ae Stuart Yoder
1016 1a61a9ae Stuart Yoder
int kvmppc_get_hasidle(CPUPPCState *env)
1017 1a61a9ae Stuart Yoder
{
1018 1a61a9ae Stuart Yoder
    struct kvm_ppc_pvinfo pvinfo;
1019 1a61a9ae Stuart Yoder
1020 1a61a9ae Stuart Yoder
    if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1021 1a61a9ae Stuart Yoder
        (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1022 1a61a9ae Stuart Yoder
        return 1;
1023 1a61a9ae Stuart Yoder
    }
1024 1a61a9ae Stuart Yoder
1025 1a61a9ae Stuart Yoder
    return 0;
1026 1a61a9ae Stuart Yoder
}
1027 1a61a9ae Stuart Yoder
1028 1328c2bf Andreas Färber
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
1029 45024f09 Alexander Graf
{
1030 45024f09 Alexander Graf
    uint32_t *hc = (uint32_t*)buf;
1031 45024f09 Alexander Graf
    struct kvm_ppc_pvinfo pvinfo;
1032 45024f09 Alexander Graf
1033 1a61a9ae Stuart Yoder
    if (!kvmppc_get_pvinfo(env, &pvinfo)) {
1034 45024f09 Alexander Graf
        memcpy(buf, pvinfo.hcall, buf_len);
1035 45024f09 Alexander Graf
        return 0;
1036 45024f09 Alexander Graf
    }
1037 45024f09 Alexander Graf
1038 45024f09 Alexander Graf
    /*
1039 45024f09 Alexander Graf
     * Fallback to always fail hypercalls:
1040 45024f09 Alexander Graf
     *
1041 45024f09 Alexander Graf
     *     li r3, -1
1042 45024f09 Alexander Graf
     *     nop
1043 45024f09 Alexander Graf
     *     nop
1044 45024f09 Alexander Graf
     *     nop
1045 45024f09 Alexander Graf
     */
1046 45024f09 Alexander Graf
1047 45024f09 Alexander Graf
    hc[0] = 0x3860ffff;
1048 45024f09 Alexander Graf
    hc[1] = 0x60000000;
1049 45024f09 Alexander Graf
    hc[2] = 0x60000000;
1050 45024f09 Alexander Graf
    hc[3] = 0x60000000;
1051 45024f09 Alexander Graf
1052 45024f09 Alexander Graf
    return 0;
1053 45024f09 Alexander Graf
}
1054 45024f09 Alexander Graf
1055 1bc22652 Andreas Färber
void kvmppc_set_papr(PowerPCCPU *cpu)
1056 f61b4bed Alexander Graf
{
1057 1bc22652 Andreas Färber
    CPUPPCState *env = &cpu->env;
1058 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
1059 94135e81 Alexander Graf
    struct kvm_enable_cap cap = {};
1060 f61b4bed Alexander Graf
    int ret;
1061 f61b4bed Alexander Graf
1062 f61b4bed Alexander Graf
    cap.cap = KVM_CAP_PPC_PAPR;
1063 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
1064 f61b4bed Alexander Graf
1065 f61b4bed Alexander Graf
    if (ret) {
1066 f1af19d7 David Gibson
        cpu_abort(env, "This KVM version does not support PAPR\n");
1067 94135e81 Alexander Graf
    }
1068 f61b4bed Alexander Graf
}
1069 f61b4bed Alexander Graf
1070 5b95b8b9 Alexander Graf
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
1071 5b95b8b9 Alexander Graf
{
1072 5b95b8b9 Alexander Graf
    CPUPPCState *env = &cpu->env;
1073 5b95b8b9 Alexander Graf
    CPUState *cs = CPU(cpu);
1074 5b95b8b9 Alexander Graf
    struct kvm_enable_cap cap = {};
1075 5b95b8b9 Alexander Graf
    int ret;
1076 5b95b8b9 Alexander Graf
1077 5b95b8b9 Alexander Graf
    cap.cap = KVM_CAP_PPC_EPR;
1078 5b95b8b9 Alexander Graf
    cap.args[0] = mpic_proxy;
1079 5b95b8b9 Alexander Graf
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
1080 5b95b8b9 Alexander Graf
1081 5b95b8b9 Alexander Graf
    if (ret && mpic_proxy) {
1082 5b95b8b9 Alexander Graf
        cpu_abort(env, "This KVM version does not support EPR\n");
1083 5b95b8b9 Alexander Graf
    }
1084 5b95b8b9 Alexander Graf
}
1085 5b95b8b9 Alexander Graf
1086 e97c3636 David Gibson
int kvmppc_smt_threads(void)
1087 e97c3636 David Gibson
{
1088 e97c3636 David Gibson
    return cap_ppc_smt ? cap_ppc_smt : 1;
1089 e97c3636 David Gibson
}
1090 e97c3636 David Gibson
1091 7f763a5d David Gibson
#ifdef TARGET_PPC64
1092 354ac20a David Gibson
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
1093 354ac20a David Gibson
{
1094 354ac20a David Gibson
    void *rma;
1095 354ac20a David Gibson
    off_t size;
1096 354ac20a David Gibson
    int fd;
1097 354ac20a David Gibson
    struct kvm_allocate_rma ret;
1098 354ac20a David Gibson
    MemoryRegion *rma_region;
1099 354ac20a David Gibson
1100 354ac20a David Gibson
    /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
1101 354ac20a David Gibson
     * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
1102 354ac20a David Gibson
     *                      not necessary on this hardware
1103 354ac20a David Gibson
     * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
1104 354ac20a David Gibson
     *
1105 354ac20a David Gibson
     * FIXME: We should allow the user to force contiguous RMA
1106 354ac20a David Gibson
     * allocation in the cap_ppc_rma==1 case.
1107 354ac20a David Gibson
     */
1108 354ac20a David Gibson
    if (cap_ppc_rma < 2) {
1109 354ac20a David Gibson
        return 0;
1110 354ac20a David Gibson
    }
1111 354ac20a David Gibson
1112 354ac20a David Gibson
    fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
1113 354ac20a David Gibson
    if (fd < 0) {
1114 354ac20a David Gibson
        fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
1115 354ac20a David Gibson
                strerror(errno));
1116 354ac20a David Gibson
        return -1;
1117 354ac20a David Gibson
    }
1118 354ac20a David Gibson
1119 354ac20a David Gibson
    size = MIN(ret.rma_size, 256ul << 20);
1120 354ac20a David Gibson
1121 354ac20a David Gibson
    rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1122 354ac20a David Gibson
    if (rma == MAP_FAILED) {
1123 354ac20a David Gibson
        fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
1124 354ac20a David Gibson
        return -1;
1125 354ac20a David Gibson
    };
1126 354ac20a David Gibson
1127 354ac20a David Gibson
    rma_region = g_new(MemoryRegion, 1);
1128 6148b23d Avi Kivity
    memory_region_init_ram_ptr(rma_region, name, size, rma);
1129 6148b23d Avi Kivity
    vmstate_register_ram_global(rma_region);
1130 354ac20a David Gibson
    memory_region_add_subregion(sysmem, 0, rma_region);
1131 354ac20a David Gibson
1132 354ac20a David Gibson
    return size;
1133 354ac20a David Gibson
}
1134 354ac20a David Gibson
1135 7f763a5d David Gibson
uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
1136 7f763a5d David Gibson
{
1137 7f763a5d David Gibson
    if (cap_ppc_rma >= 2) {
1138 7f763a5d David Gibson
        return current_size;
1139 7f763a5d David Gibson
    }
1140 7f763a5d David Gibson
    return MIN(current_size,
1141 7f763a5d David Gibson
               getrampagesize() << (hash_shift - 7));
1142 7f763a5d David Gibson
}
1143 7f763a5d David Gibson
#endif
1144 7f763a5d David Gibson
1145 0f5cb298 David Gibson
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
1146 0f5cb298 David Gibson
{
1147 0f5cb298 David Gibson
    struct kvm_create_spapr_tce args = {
1148 0f5cb298 David Gibson
        .liobn = liobn,
1149 0f5cb298 David Gibson
        .window_size = window_size,
1150 0f5cb298 David Gibson
    };
1151 0f5cb298 David Gibson
    long len;
1152 0f5cb298 David Gibson
    int fd;
1153 0f5cb298 David Gibson
    void *table;
1154 0f5cb298 David Gibson
1155 b5aec396 David Gibson
    /* Must set fd to -1 so we don't try to munmap when called for
1156 b5aec396 David Gibson
     * destroying the table, which the upper layers -will- do
1157 b5aec396 David Gibson
     */
1158 b5aec396 David Gibson
    *pfd = -1;
1159 0f5cb298 David Gibson
    if (!cap_spapr_tce) {
1160 0f5cb298 David Gibson
        return NULL;
1161 0f5cb298 David Gibson
    }
1162 0f5cb298 David Gibson
1163 0f5cb298 David Gibson
    fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
1164 0f5cb298 David Gibson
    if (fd < 0) {
1165 b5aec396 David Gibson
        fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
1166 b5aec396 David Gibson
                liobn);
1167 0f5cb298 David Gibson
        return NULL;
1168 0f5cb298 David Gibson
    }
1169 0f5cb298 David Gibson
1170 ad0ebb91 David Gibson
    len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(sPAPRTCE);
1171 0f5cb298 David Gibson
    /* FIXME: round this up to page size */
1172 0f5cb298 David Gibson
1173 74b41e56 David Gibson
    table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1174 0f5cb298 David Gibson
    if (table == MAP_FAILED) {
1175 b5aec396 David Gibson
        fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
1176 b5aec396 David Gibson
                liobn);
1177 0f5cb298 David Gibson
        close(fd);
1178 0f5cb298 David Gibson
        return NULL;
1179 0f5cb298 David Gibson
    }
1180 0f5cb298 David Gibson
1181 0f5cb298 David Gibson
    *pfd = fd;
1182 0f5cb298 David Gibson
    return table;
1183 0f5cb298 David Gibson
}
1184 0f5cb298 David Gibson
1185 0f5cb298 David Gibson
int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
1186 0f5cb298 David Gibson
{
1187 0f5cb298 David Gibson
    long len;
1188 0f5cb298 David Gibson
1189 0f5cb298 David Gibson
    if (fd < 0) {
1190 0f5cb298 David Gibson
        return -1;
1191 0f5cb298 David Gibson
    }
1192 0f5cb298 David Gibson
1193 ad0ebb91 David Gibson
    len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE);
1194 0f5cb298 David Gibson
    if ((munmap(table, len) < 0) ||
1195 0f5cb298 David Gibson
        (close(fd) < 0)) {
1196 b5aec396 David Gibson
        fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
1197 b5aec396 David Gibson
                strerror(errno));
1198 0f5cb298 David Gibson
        /* Leak the table */
1199 0f5cb298 David Gibson
    }
1200 0f5cb298 David Gibson
1201 0f5cb298 David Gibson
    return 0;
1202 0f5cb298 David Gibson
}
1203 0f5cb298 David Gibson
1204 7f763a5d David Gibson
int kvmppc_reset_htab(int shift_hint)
1205 7f763a5d David Gibson
{
1206 7f763a5d David Gibson
    uint32_t shift = shift_hint;
1207 7f763a5d David Gibson
1208 ace9a2cb David Gibson
    if (!kvm_enabled()) {
1209 ace9a2cb David Gibson
        /* Full emulation, tell caller to allocate htab itself */
1210 ace9a2cb David Gibson
        return 0;
1211 ace9a2cb David Gibson
    }
1212 ace9a2cb David Gibson
    if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
1213 7f763a5d David Gibson
        int ret;
1214 7f763a5d David Gibson
        ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
1215 ace9a2cb David Gibson
        if (ret == -ENOTTY) {
1216 ace9a2cb David Gibson
            /* At least some versions of PR KVM advertise the
1217 ace9a2cb David Gibson
             * capability, but don't implement the ioctl().  Oops.
1218 ace9a2cb David Gibson
             * Return 0 so that we allocate the htab in qemu, as is
1219 ace9a2cb David Gibson
             * correct for PR. */
1220 ace9a2cb David Gibson
            return 0;
1221 ace9a2cb David Gibson
        } else if (ret < 0) {
1222 7f763a5d David Gibson
            return ret;
1223 7f763a5d David Gibson
        }
1224 7f763a5d David Gibson
        return shift;
1225 7f763a5d David Gibson
    }
1226 7f763a5d David Gibson
1227 ace9a2cb David Gibson
    /* We have a kernel that predates the htab reset calls.  For PR
1228 ace9a2cb David Gibson
     * KVM, we need to allocate the htab ourselves, for an HV KVM of
1229 ace9a2cb David Gibson
     * this era, it has allocated a 16MB fixed size hash table
1230 ace9a2cb David Gibson
     * already.  Kernels of this era have the GET_PVINFO capability
1231 ace9a2cb David Gibson
     * only on PR, so we use this hack to determine the right
1232 ace9a2cb David Gibson
     * answer */
1233 ace9a2cb David Gibson
    if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
1234 ace9a2cb David Gibson
        /* PR - tell caller to allocate htab */
1235 ace9a2cb David Gibson
        return 0;
1236 ace9a2cb David Gibson
    } else {
1237 ace9a2cb David Gibson
        /* HV - assume 16MB kernel allocated htab */
1238 ace9a2cb David Gibson
        return 24;
1239 ace9a2cb David Gibson
    }
1240 7f763a5d David Gibson
}
1241 7f763a5d David Gibson
1242 a1e98583 David Gibson
static inline uint32_t mfpvr(void)
1243 a1e98583 David Gibson
{
1244 a1e98583 David Gibson
    uint32_t pvr;
1245 a1e98583 David Gibson
1246 a1e98583 David Gibson
    asm ("mfpvr %0"
1247 a1e98583 David Gibson
         : "=r"(pvr));
1248 a1e98583 David Gibson
    return pvr;
1249 a1e98583 David Gibson
}
1250 a1e98583 David Gibson
1251 a7342588 David Gibson
static void alter_insns(uint64_t *word, uint64_t flags, bool on)
1252 a7342588 David Gibson
{
1253 a7342588 David Gibson
    if (on) {
1254 a7342588 David Gibson
        *word |= flags;
1255 a7342588 David Gibson
    } else {
1256 a7342588 David Gibson
        *word &= ~flags;
1257 a7342588 David Gibson
    }
1258 a7342588 David Gibson
}
1259 a7342588 David Gibson
1260 2985b86b Andreas Färber
static void kvmppc_host_cpu_initfn(Object *obj)
1261 a1e98583 David Gibson
{
1262 1b7ce68f Andreas Färber
    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(obj);
1263 1b7ce68f Andreas Färber
1264 2985b86b Andreas Färber
    assert(kvm_enabled());
1265 1b7ce68f Andreas Färber
1266 1b7ce68f Andreas Färber
    if (pcc->info->pvr != mfpvr()) {
1267 1b7ce68f Andreas Färber
        fprintf(stderr, "Your host CPU is unsupported.\n"
1268 1b7ce68f Andreas Färber
                "Please choose a supported model instead, see -cpu ?.\n");
1269 1b7ce68f Andreas Färber
        exit(1);
1270 1b7ce68f Andreas Färber
    }
1271 2985b86b Andreas Färber
}
1272 2985b86b Andreas Färber
1273 2985b86b Andreas Färber
static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
1274 2985b86b Andreas Färber
{
1275 2985b86b Andreas Färber
    PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
1276 a1e98583 David Gibson
    uint32_t host_pvr = mfpvr();
1277 2985b86b Andreas Färber
    PowerPCCPUClass *pvr_pcc;
1278 a7342588 David Gibson
    ppc_def_t *spec;
1279 a7342588 David Gibson
    uint32_t vmx = kvmppc_get_vmx();
1280 a7342588 David Gibson
    uint32_t dfp = kvmppc_get_dfp();
1281 a1e98583 David Gibson
1282 a7342588 David Gibson
    spec = g_malloc0(sizeof(*spec));
1283 2985b86b Andreas Färber
1284 2985b86b Andreas Färber
    pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
1285 2985b86b Andreas Färber
    if (pvr_pcc != NULL) {
1286 2985b86b Andreas Färber
        memcpy(spec, pvr_pcc->info, sizeof(*spec));
1287 2985b86b Andreas Färber
    }
1288 2985b86b Andreas Färber
    pcc->info = spec;
1289 2985b86b Andreas Färber
    /* Override the display name for -cpu ? and QMP */
1290 2985b86b Andreas Färber
    pcc->info->name = "host";
1291 a7342588 David Gibson
1292 a7342588 David Gibson
    /* Now fix up the spec with information we can query from the host */
1293 a7342588 David Gibson
1294 70bca53f Alexander Graf
    if (vmx != -1) {
1295 70bca53f Alexander Graf
        /* Only override when we know what the host supports */
1296 70bca53f Alexander Graf
        alter_insns(&spec->insns_flags, PPC_ALTIVEC, vmx > 0);
1297 70bca53f Alexander Graf
        alter_insns(&spec->insns_flags2, PPC2_VSX, vmx > 1);
1298 70bca53f Alexander Graf
    }
1299 70bca53f Alexander Graf
    if (dfp != -1) {
1300 70bca53f Alexander Graf
        /* Only override when we know what the host supports */
1301 70bca53f Alexander Graf
        alter_insns(&spec->insns_flags2, PPC2_DFP, dfp);
1302 70bca53f Alexander Graf
    }
1303 a1e98583 David Gibson
}
1304 a1e98583 David Gibson
1305 55e5c285 Andreas Färber
int kvmppc_fixup_cpu(PowerPCCPU *cpu)
1306 12b1143b David Gibson
{
1307 55e5c285 Andreas Färber
    CPUState *cs = CPU(cpu);
1308 12b1143b David Gibson
    int smt;
1309 12b1143b David Gibson
1310 12b1143b David Gibson
    /* Adjust cpu index for SMT */
1311 12b1143b David Gibson
    smt = kvmppc_smt_threads();
1312 55e5c285 Andreas Färber
    cs->cpu_index = (cs->cpu_index / smp_threads) * smt
1313 55e5c285 Andreas Färber
        + (cs->cpu_index % smp_threads);
1314 12b1143b David Gibson
1315 12b1143b David Gibson
    return 0;
1316 12b1143b David Gibson
}
1317 12b1143b David Gibson
1318 12b1143b David Gibson
1319 20d695a9 Andreas Färber
bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1320 4513d923 Gleb Natapov
{
1321 4513d923 Gleb Natapov
    return true;
1322 4513d923 Gleb Natapov
}
1323 a1b87fe0 Jan Kiszka
1324 20d695a9 Andreas Färber
int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
1325 a1b87fe0 Jan Kiszka
{
1326 a1b87fe0 Jan Kiszka
    return 1;
1327 a1b87fe0 Jan Kiszka
}
1328 a1b87fe0 Jan Kiszka
1329 a1b87fe0 Jan Kiszka
int kvm_arch_on_sigbus(int code, void *addr)
1330 a1b87fe0 Jan Kiszka
{
1331 a1b87fe0 Jan Kiszka
    return 1;
1332 a1b87fe0 Jan Kiszka
}
1333 2985b86b Andreas Färber
1334 2985b86b Andreas Färber
static const TypeInfo kvm_host_cpu_type_info = {
1335 2985b86b Andreas Färber
    .name = TYPE_HOST_POWERPC_CPU,
1336 2985b86b Andreas Färber
    .parent = TYPE_POWERPC_CPU,
1337 2985b86b Andreas Färber
    .instance_init = kvmppc_host_cpu_initfn,
1338 2985b86b Andreas Färber
    .class_init = kvmppc_host_cpu_class_init,
1339 2985b86b Andreas Färber
};
1340 2985b86b Andreas Färber
1341 2985b86b Andreas Färber
static void kvm_ppc_register_types(void)
1342 2985b86b Andreas Färber
{
1343 2985b86b Andreas Färber
    type_register_static(&kvm_host_cpu_type_info);
1344 2985b86b Andreas Färber
}
1345 2985b86b Andreas Färber
1346 2985b86b Andreas Färber
type_init(kvm_ppc_register_types)