Statistics
| Branch: | Revision:

root / target-ppc / kvm.c @ 7c43bca0

History | View | Annotate | Download (52.9 kB)

1 d76d1650 aurel32
/*
2 d76d1650 aurel32
 * PowerPC implementation of KVM hooks
3 d76d1650 aurel32
 *
4 d76d1650 aurel32
 * Copyright IBM Corp. 2007
5 90dc8812 Scott Wood
 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 d76d1650 aurel32
 *
7 d76d1650 aurel32
 * Authors:
8 d76d1650 aurel32
 *  Jerone Young <jyoung5@us.ibm.com>
9 d76d1650 aurel32
 *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 d76d1650 aurel32
 *  Hollis Blanchard <hollisb@us.ibm.com>
11 d76d1650 aurel32
 *
12 d76d1650 aurel32
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 d76d1650 aurel32
 * See the COPYING file in the top-level directory.
14 d76d1650 aurel32
 *
15 d76d1650 aurel32
 */
16 d76d1650 aurel32
17 eadaada1 Alexander Graf
#include <dirent.h>
18 d76d1650 aurel32
#include <sys/types.h>
19 d76d1650 aurel32
#include <sys/ioctl.h>
20 d76d1650 aurel32
#include <sys/mman.h>
21 4656e1f0 Benjamin Herrenschmidt
#include <sys/vfs.h>
22 d76d1650 aurel32
23 d76d1650 aurel32
#include <linux/kvm.h>
24 d76d1650 aurel32
25 d76d1650 aurel32
#include "qemu-common.h"
26 1de7afc9 Paolo Bonzini
#include "qemu/timer.h"
27 9c17d615 Paolo Bonzini
#include "sysemu/sysemu.h"
28 9c17d615 Paolo Bonzini
#include "sysemu/kvm.h"
29 d76d1650 aurel32
#include "kvm_ppc.h"
30 d76d1650 aurel32
#include "cpu.h"
31 9c17d615 Paolo Bonzini
#include "sysemu/cpus.h"
32 9c17d615 Paolo Bonzini
#include "sysemu/device_tree.h"
33 d5aea6f3 David Gibson
#include "mmu-hash64.h"
34 d76d1650 aurel32
35 f61b4bed Alexander Graf
#include "hw/sysbus.h"
36 0d09e41a Paolo Bonzini
#include "hw/ppc/spapr.h"
37 0d09e41a Paolo Bonzini
#include "hw/ppc/spapr_vio.h"
38 31f2cb8f Bharat Bhushan
#include "sysemu/watchdog.h"
39 b36f100e Alexey Kardashevskiy
#include "trace.h"
40 f61b4bed Alexander Graf
41 d76d1650 aurel32
//#define DEBUG_KVM
42 d76d1650 aurel32
43 d76d1650 aurel32
#ifdef DEBUG_KVM
44 da56ff91 Peter Maydell
#define DPRINTF(fmt, ...) \
45 d76d1650 aurel32
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
46 d76d1650 aurel32
#else
47 da56ff91 Peter Maydell
#define DPRINTF(fmt, ...) \
48 d76d1650 aurel32
    do { } while (0)
49 d76d1650 aurel32
#endif
50 d76d1650 aurel32
51 eadaada1 Alexander Graf
#define PROC_DEVTREE_CPU      "/proc/device-tree/cpus/"
52 eadaada1 Alexander Graf
53 94a8d39a Jan Kiszka
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
54 94a8d39a Jan Kiszka
    KVM_CAP_LAST_INFO
55 94a8d39a Jan Kiszka
};
56 94a8d39a Jan Kiszka
57 fc87e185 Alexander Graf
static int cap_interrupt_unset = false;
58 fc87e185 Alexander Graf
static int cap_interrupt_level = false;
59 90dc8812 Scott Wood
static int cap_segstate;
60 90dc8812 Scott Wood
static int cap_booke_sregs;
61 e97c3636 David Gibson
static int cap_ppc_smt;
62 354ac20a David Gibson
static int cap_ppc_rma;
63 0f5cb298 David Gibson
static int cap_spapr_tce;
64 f1af19d7 David Gibson
static int cap_hior;
65 d67d40ea David Gibson
static int cap_one_reg;
66 3b961124 Stuart Yoder
static int cap_epr;
67 31f2cb8f Bharat Bhushan
static int cap_ppc_watchdog;
68 9b00ea49 David Gibson
static int cap_papr;
69 e68cb8b4 Alexey Kardashevskiy
static int cap_htab_fd;
70 fc87e185 Alexander Graf
71 c821c2bd Alexander Graf
/* XXX We have a race condition where we actually have a level triggered
72 c821c2bd Alexander Graf
 *     interrupt, but the infrastructure can't expose that yet, so the guest
73 c821c2bd Alexander Graf
 *     takes but ignores it, goes to sleep and never gets notified that there's
74 c821c2bd Alexander Graf
 *     still an interrupt pending.
75 c6a94ba5 Alexander Graf
 *
76 c821c2bd Alexander Graf
 *     As a quick workaround, let's just wake up again 20 ms after we injected
77 c821c2bd Alexander Graf
 *     an interrupt. That way we can assure that we're always reinjecting
78 c821c2bd Alexander Graf
 *     interrupts in case the guest swallowed them.
79 c6a94ba5 Alexander Graf
 */
80 c6a94ba5 Alexander Graf
static QEMUTimer *idle_timer;
81 c6a94ba5 Alexander Graf
82 d5a68146 Andreas Färber
static void kvm_kick_cpu(void *opaque)
83 c6a94ba5 Alexander Graf
{
84 d5a68146 Andreas Färber
    PowerPCCPU *cpu = opaque;
85 d5a68146 Andreas Färber
86 c08d7424 Andreas Färber
    qemu_cpu_kick(CPU(cpu));
87 c6a94ba5 Alexander Graf
}
88 c6a94ba5 Alexander Graf
89 5ba4576b Andreas Färber
static int kvm_ppc_register_host_cpu_type(void);
90 5ba4576b Andreas Färber
91 cad1e282 Jan Kiszka
int kvm_arch_init(KVMState *s)
92 d76d1650 aurel32
{
93 fc87e185 Alexander Graf
    cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
94 fc87e185 Alexander Graf
    cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
95 90dc8812 Scott Wood
    cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
96 90dc8812 Scott Wood
    cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
97 e97c3636 David Gibson
    cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
98 354ac20a David Gibson
    cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
99 0f5cb298 David Gibson
    cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
100 d67d40ea David Gibson
    cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
101 f1af19d7 David Gibson
    cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
102 3b961124 Stuart Yoder
    cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
103 31f2cb8f Bharat Bhushan
    cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
104 9b00ea49 David Gibson
    /* Note: we don't set cap_papr here, because this capability is
105 9b00ea49 David Gibson
     * only activated after this by kvmppc_set_papr() */
106 e68cb8b4 Alexey Kardashevskiy
    cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
107 fc87e185 Alexander Graf
108 fc87e185 Alexander Graf
    if (!cap_interrupt_level) {
109 fc87e185 Alexander Graf
        fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
110 fc87e185 Alexander Graf
                        "VM to stall at times!\n");
111 fc87e185 Alexander Graf
    }
112 fc87e185 Alexander Graf
113 5ba4576b Andreas Färber
    kvm_ppc_register_host_cpu_type();
114 5ba4576b Andreas Färber
115 d76d1650 aurel32
    return 0;
116 d76d1650 aurel32
}
117 d76d1650 aurel32
118 1bc22652 Andreas Färber
static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
119 d76d1650 aurel32
{
120 1bc22652 Andreas Färber
    CPUPPCState *cenv = &cpu->env;
121 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
122 861bbc80 Alexander Graf
    struct kvm_sregs sregs;
123 5666ca4a Scott Wood
    int ret;
124 5666ca4a Scott Wood
125 5666ca4a Scott Wood
    if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
126 64e07be5 Alexander Graf
        /* What we're really trying to say is "if we're on BookE, we use
127 64e07be5 Alexander Graf
           the native PVR for now". This is the only sane way to check
128 64e07be5 Alexander Graf
           it though, so we potentially confuse users that they can run
129 64e07be5 Alexander Graf
           BookE guests on BookS. Let's hope nobody dares enough :) */
130 5666ca4a Scott Wood
        return 0;
131 5666ca4a Scott Wood
    } else {
132 90dc8812 Scott Wood
        if (!cap_segstate) {
133 64e07be5 Alexander Graf
            fprintf(stderr, "kvm error: missing PVR setting capability\n");
134 64e07be5 Alexander Graf
            return -ENOSYS;
135 5666ca4a Scott Wood
        }
136 5666ca4a Scott Wood
    }
137 5666ca4a Scott Wood
138 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
139 5666ca4a Scott Wood
    if (ret) {
140 5666ca4a Scott Wood
        return ret;
141 5666ca4a Scott Wood
    }
142 861bbc80 Alexander Graf
143 861bbc80 Alexander Graf
    sregs.pvr = cenv->spr[SPR_PVR];
144 1bc22652 Andreas Färber
    return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
145 5666ca4a Scott Wood
}
146 5666ca4a Scott Wood
147 93dd5e85 Scott Wood
/* Set up a shared TLB array with KVM */
148 1bc22652 Andreas Färber
static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
149 93dd5e85 Scott Wood
{
150 1bc22652 Andreas Färber
    CPUPPCState *env = &cpu->env;
151 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
152 93dd5e85 Scott Wood
    struct kvm_book3e_206_tlb_params params = {};
153 93dd5e85 Scott Wood
    struct kvm_config_tlb cfg = {};
154 93dd5e85 Scott Wood
    struct kvm_enable_cap encap = {};
155 93dd5e85 Scott Wood
    unsigned int entries = 0;
156 93dd5e85 Scott Wood
    int ret, i;
157 93dd5e85 Scott Wood
158 93dd5e85 Scott Wood
    if (!kvm_enabled() ||
159 a60f24b5 Andreas Färber
        !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
160 93dd5e85 Scott Wood
        return 0;
161 93dd5e85 Scott Wood
    }
162 93dd5e85 Scott Wood
163 93dd5e85 Scott Wood
    assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
164 93dd5e85 Scott Wood
165 93dd5e85 Scott Wood
    for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
166 93dd5e85 Scott Wood
        params.tlb_sizes[i] = booke206_tlb_size(env, i);
167 93dd5e85 Scott Wood
        params.tlb_ways[i] = booke206_tlb_ways(env, i);
168 93dd5e85 Scott Wood
        entries += params.tlb_sizes[i];
169 93dd5e85 Scott Wood
    }
170 93dd5e85 Scott Wood
171 93dd5e85 Scott Wood
    assert(entries == env->nb_tlb);
172 93dd5e85 Scott Wood
    assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
173 93dd5e85 Scott Wood
174 93dd5e85 Scott Wood
    env->tlb_dirty = true;
175 93dd5e85 Scott Wood
176 93dd5e85 Scott Wood
    cfg.array = (uintptr_t)env->tlb.tlbm;
177 93dd5e85 Scott Wood
    cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
178 93dd5e85 Scott Wood
    cfg.params = (uintptr_t)&params;
179 93dd5e85 Scott Wood
    cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
180 93dd5e85 Scott Wood
181 93dd5e85 Scott Wood
    encap.cap = KVM_CAP_SW_TLB;
182 93dd5e85 Scott Wood
    encap.args[0] = (uintptr_t)&cfg;
183 93dd5e85 Scott Wood
184 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
185 93dd5e85 Scott Wood
    if (ret < 0) {
186 93dd5e85 Scott Wood
        fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
187 93dd5e85 Scott Wood
                __func__, strerror(-ret));
188 93dd5e85 Scott Wood
        return ret;
189 93dd5e85 Scott Wood
    }
190 93dd5e85 Scott Wood
191 93dd5e85 Scott Wood
    env->kvm_sw_tlb = true;
192 93dd5e85 Scott Wood
    return 0;
193 93dd5e85 Scott Wood
}
194 93dd5e85 Scott Wood
195 4656e1f0 Benjamin Herrenschmidt
196 4656e1f0 Benjamin Herrenschmidt
#if defined(TARGET_PPC64)
197 a60f24b5 Andreas Färber
static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
198 4656e1f0 Benjamin Herrenschmidt
                                       struct kvm_ppc_smmu_info *info)
199 4656e1f0 Benjamin Herrenschmidt
{
200 a60f24b5 Andreas Färber
    CPUPPCState *env = &cpu->env;
201 a60f24b5 Andreas Färber
    CPUState *cs = CPU(cpu);
202 a60f24b5 Andreas Färber
203 4656e1f0 Benjamin Herrenschmidt
    memset(info, 0, sizeof(*info));
204 4656e1f0 Benjamin Herrenschmidt
205 4656e1f0 Benjamin Herrenschmidt
    /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
206 4656e1f0 Benjamin Herrenschmidt
     * need to "guess" what the supported page sizes are.
207 4656e1f0 Benjamin Herrenschmidt
     *
208 4656e1f0 Benjamin Herrenschmidt
     * For that to work we make a few assumptions:
209 4656e1f0 Benjamin Herrenschmidt
     *
210 4656e1f0 Benjamin Herrenschmidt
     * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
211 4656e1f0 Benjamin Herrenschmidt
     *   KVM which only supports 4K and 16M pages, but supports them
212 4656e1f0 Benjamin Herrenschmidt
     *   regardless of the backing store characteritics. We also don't
213 4656e1f0 Benjamin Herrenschmidt
     *   support 1T segments.
214 4656e1f0 Benjamin Herrenschmidt
     *
215 4656e1f0 Benjamin Herrenschmidt
     *   This is safe as if HV KVM ever supports that capability or PR
216 4656e1f0 Benjamin Herrenschmidt
     *   KVM grows supports for more page/segment sizes, those versions
217 4656e1f0 Benjamin Herrenschmidt
     *   will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
218 4656e1f0 Benjamin Herrenschmidt
     *   will not hit this fallback
219 4656e1f0 Benjamin Herrenschmidt
     *
220 4656e1f0 Benjamin Herrenschmidt
     * - Else we are running HV KVM. This means we only support page
221 4656e1f0 Benjamin Herrenschmidt
     *   sizes that fit in the backing store. Additionally we only
222 4656e1f0 Benjamin Herrenschmidt
     *   advertize 64K pages if the processor is ARCH 2.06 and we assume
223 4656e1f0 Benjamin Herrenschmidt
     *   P7 encodings for the SLB and hash table. Here too, we assume
224 4656e1f0 Benjamin Herrenschmidt
     *   support for any newer processor will mean a kernel that
225 4656e1f0 Benjamin Herrenschmidt
     *   implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
226 4656e1f0 Benjamin Herrenschmidt
     *   this fallback.
227 4656e1f0 Benjamin Herrenschmidt
     */
228 a60f24b5 Andreas Färber
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
229 4656e1f0 Benjamin Herrenschmidt
        /* No flags */
230 4656e1f0 Benjamin Herrenschmidt
        info->flags = 0;
231 4656e1f0 Benjamin Herrenschmidt
        info->slb_size = 64;
232 4656e1f0 Benjamin Herrenschmidt
233 4656e1f0 Benjamin Herrenschmidt
        /* Standard 4k base page size segment */
234 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].page_shift = 12;
235 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].slb_enc = 0;
236 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].enc[0].page_shift = 12;
237 4656e1f0 Benjamin Herrenschmidt
        info->sps[0].enc[0].pte_enc = 0;
238 4656e1f0 Benjamin Herrenschmidt
239 4656e1f0 Benjamin Herrenschmidt
        /* Standard 16M large page size segment */
240 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].page_shift = 24;
241 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].slb_enc = SLB_VSID_L;
242 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].enc[0].page_shift = 24;
243 4656e1f0 Benjamin Herrenschmidt
        info->sps[1].enc[0].pte_enc = 0;
244 4656e1f0 Benjamin Herrenschmidt
    } else {
245 4656e1f0 Benjamin Herrenschmidt
        int i = 0;
246 4656e1f0 Benjamin Herrenschmidt
247 4656e1f0 Benjamin Herrenschmidt
        /* HV KVM has backing store size restrictions */
248 4656e1f0 Benjamin Herrenschmidt
        info->flags = KVM_PPC_PAGE_SIZES_REAL;
249 4656e1f0 Benjamin Herrenschmidt
250 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model & POWERPC_MMU_1TSEG) {
251 4656e1f0 Benjamin Herrenschmidt
            info->flags |= KVM_PPC_1T_SEGMENTS;
252 4656e1f0 Benjamin Herrenschmidt
        }
253 4656e1f0 Benjamin Herrenschmidt
254 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model == POWERPC_MMU_2_06) {
255 4656e1f0 Benjamin Herrenschmidt
            info->slb_size = 32;
256 4656e1f0 Benjamin Herrenschmidt
        } else {
257 4656e1f0 Benjamin Herrenschmidt
            info->slb_size = 64;
258 4656e1f0 Benjamin Herrenschmidt
        }
259 4656e1f0 Benjamin Herrenschmidt
260 4656e1f0 Benjamin Herrenschmidt
        /* Standard 4k base page size segment */
261 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].page_shift = 12;
262 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].slb_enc = 0;
263 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].page_shift = 12;
264 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].pte_enc = 0;
265 4656e1f0 Benjamin Herrenschmidt
        i++;
266 4656e1f0 Benjamin Herrenschmidt
267 4656e1f0 Benjamin Herrenschmidt
        /* 64K on MMU 2.06 */
268 4656e1f0 Benjamin Herrenschmidt
        if (env->mmu_model == POWERPC_MMU_2_06) {
269 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].page_shift = 16;
270 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].slb_enc = 0x110;
271 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].enc[0].page_shift = 16;
272 4656e1f0 Benjamin Herrenschmidt
            info->sps[i].enc[0].pte_enc = 1;
273 4656e1f0 Benjamin Herrenschmidt
            i++;
274 4656e1f0 Benjamin Herrenschmidt
        }
275 4656e1f0 Benjamin Herrenschmidt
276 4656e1f0 Benjamin Herrenschmidt
        /* Standard 16M large page size segment */
277 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].page_shift = 24;
278 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].slb_enc = SLB_VSID_L;
279 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].page_shift = 24;
280 4656e1f0 Benjamin Herrenschmidt
        info->sps[i].enc[0].pte_enc = 0;
281 4656e1f0 Benjamin Herrenschmidt
    }
282 4656e1f0 Benjamin Herrenschmidt
}
283 4656e1f0 Benjamin Herrenschmidt
284 a60f24b5 Andreas Färber
static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
285 4656e1f0 Benjamin Herrenschmidt
{
286 a60f24b5 Andreas Färber
    CPUState *cs = CPU(cpu);
287 4656e1f0 Benjamin Herrenschmidt
    int ret;
288 4656e1f0 Benjamin Herrenschmidt
289 a60f24b5 Andreas Färber
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
290 a60f24b5 Andreas Färber
        ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
291 4656e1f0 Benjamin Herrenschmidt
        if (ret == 0) {
292 4656e1f0 Benjamin Herrenschmidt
            return;
293 4656e1f0 Benjamin Herrenschmidt
        }
294 4656e1f0 Benjamin Herrenschmidt
    }
295 4656e1f0 Benjamin Herrenschmidt
296 a60f24b5 Andreas Färber
    kvm_get_fallback_smmu_info(cpu, info);
297 4656e1f0 Benjamin Herrenschmidt
}
298 4656e1f0 Benjamin Herrenschmidt
299 4656e1f0 Benjamin Herrenschmidt
static long getrampagesize(void)
300 4656e1f0 Benjamin Herrenschmidt
{
301 4656e1f0 Benjamin Herrenschmidt
    struct statfs fs;
302 4656e1f0 Benjamin Herrenschmidt
    int ret;
303 4656e1f0 Benjamin Herrenschmidt
304 4656e1f0 Benjamin Herrenschmidt
    if (!mem_path) {
305 4656e1f0 Benjamin Herrenschmidt
        /* guest RAM is backed by normal anonymous pages */
306 4656e1f0 Benjamin Herrenschmidt
        return getpagesize();
307 4656e1f0 Benjamin Herrenschmidt
    }
308 4656e1f0 Benjamin Herrenschmidt
309 4656e1f0 Benjamin Herrenschmidt
    do {
310 4656e1f0 Benjamin Herrenschmidt
        ret = statfs(mem_path, &fs);
311 4656e1f0 Benjamin Herrenschmidt
    } while (ret != 0 && errno == EINTR);
312 4656e1f0 Benjamin Herrenschmidt
313 4656e1f0 Benjamin Herrenschmidt
    if (ret != 0) {
314 4656e1f0 Benjamin Herrenschmidt
        fprintf(stderr, "Couldn't statfs() memory path: %s\n",
315 4656e1f0 Benjamin Herrenschmidt
                strerror(errno));
316 4656e1f0 Benjamin Herrenschmidt
        exit(1);
317 4656e1f0 Benjamin Herrenschmidt
    }
318 4656e1f0 Benjamin Herrenschmidt
319 4656e1f0 Benjamin Herrenschmidt
#define HUGETLBFS_MAGIC       0x958458f6
320 4656e1f0 Benjamin Herrenschmidt
321 4656e1f0 Benjamin Herrenschmidt
    if (fs.f_type != HUGETLBFS_MAGIC) {
322 4656e1f0 Benjamin Herrenschmidt
        /* Explicit mempath, but it's ordinary pages */
323 4656e1f0 Benjamin Herrenschmidt
        return getpagesize();
324 4656e1f0 Benjamin Herrenschmidt
    }
325 4656e1f0 Benjamin Herrenschmidt
326 4656e1f0 Benjamin Herrenschmidt
    /* It's hugepage, return the huge page size */
327 4656e1f0 Benjamin Herrenschmidt
    return fs.f_bsize;
328 4656e1f0 Benjamin Herrenschmidt
}
329 4656e1f0 Benjamin Herrenschmidt
330 4656e1f0 Benjamin Herrenschmidt
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
331 4656e1f0 Benjamin Herrenschmidt
{
332 4656e1f0 Benjamin Herrenschmidt
    if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
333 4656e1f0 Benjamin Herrenschmidt
        return true;
334 4656e1f0 Benjamin Herrenschmidt
    }
335 4656e1f0 Benjamin Herrenschmidt
336 4656e1f0 Benjamin Herrenschmidt
    return (1ul << shift) <= rampgsize;
337 4656e1f0 Benjamin Herrenschmidt
}
338 4656e1f0 Benjamin Herrenschmidt
339 a60f24b5 Andreas Färber
static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
340 4656e1f0 Benjamin Herrenschmidt
{
341 4656e1f0 Benjamin Herrenschmidt
    static struct kvm_ppc_smmu_info smmu_info;
342 4656e1f0 Benjamin Herrenschmidt
    static bool has_smmu_info;
343 a60f24b5 Andreas Färber
    CPUPPCState *env = &cpu->env;
344 4656e1f0 Benjamin Herrenschmidt
    long rampagesize;
345 4656e1f0 Benjamin Herrenschmidt
    int iq, ik, jq, jk;
346 4656e1f0 Benjamin Herrenschmidt
347 4656e1f0 Benjamin Herrenschmidt
    /* We only handle page sizes for 64-bit server guests for now */
348 4656e1f0 Benjamin Herrenschmidt
    if (!(env->mmu_model & POWERPC_MMU_64)) {
349 4656e1f0 Benjamin Herrenschmidt
        return;
350 4656e1f0 Benjamin Herrenschmidt
    }
351 4656e1f0 Benjamin Herrenschmidt
352 4656e1f0 Benjamin Herrenschmidt
    /* Collect MMU info from kernel if not already */
353 4656e1f0 Benjamin Herrenschmidt
    if (!has_smmu_info) {
354 a60f24b5 Andreas Färber
        kvm_get_smmu_info(cpu, &smmu_info);
355 4656e1f0 Benjamin Herrenschmidt
        has_smmu_info = true;
356 4656e1f0 Benjamin Herrenschmidt
    }
357 4656e1f0 Benjamin Herrenschmidt
358 4656e1f0 Benjamin Herrenschmidt
    rampagesize = getrampagesize();
359 4656e1f0 Benjamin Herrenschmidt
360 4656e1f0 Benjamin Herrenschmidt
    /* Convert to QEMU form */
361 4656e1f0 Benjamin Herrenschmidt
    memset(&env->sps, 0, sizeof(env->sps));
362 4656e1f0 Benjamin Herrenschmidt
363 4656e1f0 Benjamin Herrenschmidt
    for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
364 4656e1f0 Benjamin Herrenschmidt
        struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
365 4656e1f0 Benjamin Herrenschmidt
        struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
366 4656e1f0 Benjamin Herrenschmidt
367 4656e1f0 Benjamin Herrenschmidt
        if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
368 4656e1f0 Benjamin Herrenschmidt
                                 ksps->page_shift)) {
369 4656e1f0 Benjamin Herrenschmidt
            continue;
370 4656e1f0 Benjamin Herrenschmidt
        }
371 4656e1f0 Benjamin Herrenschmidt
        qsps->page_shift = ksps->page_shift;
372 4656e1f0 Benjamin Herrenschmidt
        qsps->slb_enc = ksps->slb_enc;
373 4656e1f0 Benjamin Herrenschmidt
        for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
374 4656e1f0 Benjamin Herrenschmidt
            if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
375 4656e1f0 Benjamin Herrenschmidt
                                     ksps->enc[jk].page_shift)) {
376 4656e1f0 Benjamin Herrenschmidt
                continue;
377 4656e1f0 Benjamin Herrenschmidt
            }
378 4656e1f0 Benjamin Herrenschmidt
            qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
379 4656e1f0 Benjamin Herrenschmidt
            qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
380 4656e1f0 Benjamin Herrenschmidt
            if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
381 4656e1f0 Benjamin Herrenschmidt
                break;
382 4656e1f0 Benjamin Herrenschmidt
            }
383 4656e1f0 Benjamin Herrenschmidt
        }
384 4656e1f0 Benjamin Herrenschmidt
        if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
385 4656e1f0 Benjamin Herrenschmidt
            break;
386 4656e1f0 Benjamin Herrenschmidt
        }
387 4656e1f0 Benjamin Herrenschmidt
    }
388 4656e1f0 Benjamin Herrenschmidt
    env->slb_nr = smmu_info.slb_size;
389 4656e1f0 Benjamin Herrenschmidt
    if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) {
390 4656e1f0 Benjamin Herrenschmidt
        env->mmu_model |= POWERPC_MMU_1TSEG;
391 4656e1f0 Benjamin Herrenschmidt
    } else {
392 4656e1f0 Benjamin Herrenschmidt
        env->mmu_model &= ~POWERPC_MMU_1TSEG;
393 4656e1f0 Benjamin Herrenschmidt
    }
394 4656e1f0 Benjamin Herrenschmidt
}
395 4656e1f0 Benjamin Herrenschmidt
#else /* defined (TARGET_PPC64) */
396 4656e1f0 Benjamin Herrenschmidt
397 a60f24b5 Andreas Färber
static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
398 4656e1f0 Benjamin Herrenschmidt
{
399 4656e1f0 Benjamin Herrenschmidt
}
400 4656e1f0 Benjamin Herrenschmidt
401 4656e1f0 Benjamin Herrenschmidt
#endif /* !defined (TARGET_PPC64) */
402 4656e1f0 Benjamin Herrenschmidt
403 b164e48e Eduardo Habkost
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
404 b164e48e Eduardo Habkost
{
405 b164e48e Eduardo Habkost
    return cpu->cpu_index;
406 b164e48e Eduardo Habkost
}
407 b164e48e Eduardo Habkost
408 20d695a9 Andreas Färber
int kvm_arch_init_vcpu(CPUState *cs)
409 5666ca4a Scott Wood
{
410 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
411 20d695a9 Andreas Färber
    CPUPPCState *cenv = &cpu->env;
412 5666ca4a Scott Wood
    int ret;
413 5666ca4a Scott Wood
414 4656e1f0 Benjamin Herrenschmidt
    /* Gather server mmu info from KVM and update the CPU state */
415 a60f24b5 Andreas Färber
    kvm_fixup_page_sizes(cpu);
416 4656e1f0 Benjamin Herrenschmidt
417 4656e1f0 Benjamin Herrenschmidt
    /* Synchronize sregs with kvm */
418 1bc22652 Andreas Färber
    ret = kvm_arch_sync_sregs(cpu);
419 5666ca4a Scott Wood
    if (ret) {
420 5666ca4a Scott Wood
        return ret;
421 5666ca4a Scott Wood
    }
422 861bbc80 Alexander Graf
423 bc72ad67 Alex Bligh
    idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
424 c821c2bd Alexander Graf
425 93dd5e85 Scott Wood
    /* Some targets support access to KVM's guest TLB. */
426 93dd5e85 Scott Wood
    switch (cenv->mmu_model) {
427 93dd5e85 Scott Wood
    case POWERPC_MMU_BOOKE206:
428 1bc22652 Andreas Färber
        ret = kvm_booke206_tlb_init(cpu);
429 93dd5e85 Scott Wood
        break;
430 93dd5e85 Scott Wood
    default:
431 93dd5e85 Scott Wood
        break;
432 93dd5e85 Scott Wood
    }
433 93dd5e85 Scott Wood
434 861bbc80 Alexander Graf
    return ret;
435 d76d1650 aurel32
}
436 d76d1650 aurel32
437 20d695a9 Andreas Färber
void kvm_arch_reset_vcpu(CPUState *cpu)
438 caa5af0f Jan Kiszka
{
439 caa5af0f Jan Kiszka
}
440 caa5af0f Jan Kiszka
441 1bc22652 Andreas Färber
static void kvm_sw_tlb_put(PowerPCCPU *cpu)
442 93dd5e85 Scott Wood
{
443 1bc22652 Andreas Färber
    CPUPPCState *env = &cpu->env;
444 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
445 93dd5e85 Scott Wood
    struct kvm_dirty_tlb dirty_tlb;
446 93dd5e85 Scott Wood
    unsigned char *bitmap;
447 93dd5e85 Scott Wood
    int ret;
448 93dd5e85 Scott Wood
449 93dd5e85 Scott Wood
    if (!env->kvm_sw_tlb) {
450 93dd5e85 Scott Wood
        return;
451 93dd5e85 Scott Wood
    }
452 93dd5e85 Scott Wood
453 93dd5e85 Scott Wood
    bitmap = g_malloc((env->nb_tlb + 7) / 8);
454 93dd5e85 Scott Wood
    memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
455 93dd5e85 Scott Wood
456 93dd5e85 Scott Wood
    dirty_tlb.bitmap = (uintptr_t)bitmap;
457 93dd5e85 Scott Wood
    dirty_tlb.num_dirty = env->nb_tlb;
458 93dd5e85 Scott Wood
459 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
460 93dd5e85 Scott Wood
    if (ret) {
461 93dd5e85 Scott Wood
        fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
462 93dd5e85 Scott Wood
                __func__, strerror(-ret));
463 93dd5e85 Scott Wood
    }
464 93dd5e85 Scott Wood
465 93dd5e85 Scott Wood
    g_free(bitmap);
466 93dd5e85 Scott Wood
}
467 93dd5e85 Scott Wood
468 d67d40ea David Gibson
static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
469 d67d40ea David Gibson
{
470 d67d40ea David Gibson
    PowerPCCPU *cpu = POWERPC_CPU(cs);
471 d67d40ea David Gibson
    CPUPPCState *env = &cpu->env;
472 d67d40ea David Gibson
    union {
473 d67d40ea David Gibson
        uint32_t u32;
474 d67d40ea David Gibson
        uint64_t u64;
475 d67d40ea David Gibson
    } val;
476 d67d40ea David Gibson
    struct kvm_one_reg reg = {
477 d67d40ea David Gibson
        .id = id,
478 d67d40ea David Gibson
        .addr = (uintptr_t) &val,
479 d67d40ea David Gibson
    };
480 d67d40ea David Gibson
    int ret;
481 d67d40ea David Gibson
482 d67d40ea David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
483 d67d40ea David Gibson
    if (ret != 0) {
484 b36f100e Alexey Kardashevskiy
        trace_kvm_failed_spr_get(spr, strerror(errno));
485 d67d40ea David Gibson
    } else {
486 d67d40ea David Gibson
        switch (id & KVM_REG_SIZE_MASK) {
487 d67d40ea David Gibson
        case KVM_REG_SIZE_U32:
488 d67d40ea David Gibson
            env->spr[spr] = val.u32;
489 d67d40ea David Gibson
            break;
490 d67d40ea David Gibson
491 d67d40ea David Gibson
        case KVM_REG_SIZE_U64:
492 d67d40ea David Gibson
            env->spr[spr] = val.u64;
493 d67d40ea David Gibson
            break;
494 d67d40ea David Gibson
495 d67d40ea David Gibson
        default:
496 d67d40ea David Gibson
            /* Don't handle this size yet */
497 d67d40ea David Gibson
            abort();
498 d67d40ea David Gibson
        }
499 d67d40ea David Gibson
    }
500 d67d40ea David Gibson
}
501 d67d40ea David Gibson
502 d67d40ea David Gibson
static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
503 d67d40ea David Gibson
{
504 d67d40ea David Gibson
    PowerPCCPU *cpu = POWERPC_CPU(cs);
505 d67d40ea David Gibson
    CPUPPCState *env = &cpu->env;
506 d67d40ea David Gibson
    union {
507 d67d40ea David Gibson
        uint32_t u32;
508 d67d40ea David Gibson
        uint64_t u64;
509 d67d40ea David Gibson
    } val;
510 d67d40ea David Gibson
    struct kvm_one_reg reg = {
511 d67d40ea David Gibson
        .id = id,
512 d67d40ea David Gibson
        .addr = (uintptr_t) &val,
513 d67d40ea David Gibson
    };
514 d67d40ea David Gibson
    int ret;
515 d67d40ea David Gibson
516 d67d40ea David Gibson
    switch (id & KVM_REG_SIZE_MASK) {
517 d67d40ea David Gibson
    case KVM_REG_SIZE_U32:
518 d67d40ea David Gibson
        val.u32 = env->spr[spr];
519 d67d40ea David Gibson
        break;
520 d67d40ea David Gibson
521 d67d40ea David Gibson
    case KVM_REG_SIZE_U64:
522 d67d40ea David Gibson
        val.u64 = env->spr[spr];
523 d67d40ea David Gibson
        break;
524 d67d40ea David Gibson
525 d67d40ea David Gibson
    default:
526 d67d40ea David Gibson
        /* Don't handle this size yet */
527 d67d40ea David Gibson
        abort();
528 d67d40ea David Gibson
    }
529 d67d40ea David Gibson
530 d67d40ea David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
531 d67d40ea David Gibson
    if (ret != 0) {
532 b36f100e Alexey Kardashevskiy
        trace_kvm_failed_spr_set(spr, strerror(errno));
533 d67d40ea David Gibson
    }
534 d67d40ea David Gibson
}
535 d67d40ea David Gibson
536 70b79849 David Gibson
static int kvm_put_fp(CPUState *cs)
537 70b79849 David Gibson
{
538 70b79849 David Gibson
    PowerPCCPU *cpu = POWERPC_CPU(cs);
539 70b79849 David Gibson
    CPUPPCState *env = &cpu->env;
540 70b79849 David Gibson
    struct kvm_one_reg reg;
541 70b79849 David Gibson
    int i;
542 70b79849 David Gibson
    int ret;
543 70b79849 David Gibson
544 70b79849 David Gibson
    if (env->insns_flags & PPC_FLOAT) {
545 70b79849 David Gibson
        uint64_t fpscr = env->fpscr;
546 70b79849 David Gibson
        bool vsx = !!(env->insns_flags2 & PPC2_VSX);
547 70b79849 David Gibson
548 70b79849 David Gibson
        reg.id = KVM_REG_PPC_FPSCR;
549 70b79849 David Gibson
        reg.addr = (uintptr_t)&fpscr;
550 70b79849 David Gibson
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
551 70b79849 David Gibson
        if (ret < 0) {
552 da56ff91 Peter Maydell
            DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
553 70b79849 David Gibson
            return ret;
554 70b79849 David Gibson
        }
555 70b79849 David Gibson
556 70b79849 David Gibson
        for (i = 0; i < 32; i++) {
557 70b79849 David Gibson
            uint64_t vsr[2];
558 70b79849 David Gibson
559 70b79849 David Gibson
            vsr[0] = float64_val(env->fpr[i]);
560 70b79849 David Gibson
            vsr[1] = env->vsr[i];
561 70b79849 David Gibson
            reg.addr = (uintptr_t) &vsr;
562 70b79849 David Gibson
            reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
563 70b79849 David Gibson
564 70b79849 David Gibson
            ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
565 70b79849 David Gibson
            if (ret < 0) {
566 da56ff91 Peter Maydell
                DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
567 70b79849 David Gibson
                        i, strerror(errno));
568 70b79849 David Gibson
                return ret;
569 70b79849 David Gibson
            }
570 70b79849 David Gibson
        }
571 70b79849 David Gibson
    }
572 70b79849 David Gibson
573 70b79849 David Gibson
    if (env->insns_flags & PPC_ALTIVEC) {
574 70b79849 David Gibson
        reg.id = KVM_REG_PPC_VSCR;
575 70b79849 David Gibson
        reg.addr = (uintptr_t)&env->vscr;
576 70b79849 David Gibson
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
577 70b79849 David Gibson
        if (ret < 0) {
578 da56ff91 Peter Maydell
            DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
579 70b79849 David Gibson
            return ret;
580 70b79849 David Gibson
        }
581 70b79849 David Gibson
582 70b79849 David Gibson
        for (i = 0; i < 32; i++) {
583 70b79849 David Gibson
            reg.id = KVM_REG_PPC_VR(i);
584 70b79849 David Gibson
            reg.addr = (uintptr_t)&env->avr[i];
585 70b79849 David Gibson
            ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
586 70b79849 David Gibson
            if (ret < 0) {
587 da56ff91 Peter Maydell
                DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
588 70b79849 David Gibson
                return ret;
589 70b79849 David Gibson
            }
590 70b79849 David Gibson
        }
591 70b79849 David Gibson
    }
592 70b79849 David Gibson
593 70b79849 David Gibson
    return 0;
594 70b79849 David Gibson
}
595 70b79849 David Gibson
596 70b79849 David Gibson
static int kvm_get_fp(CPUState *cs)
597 70b79849 David Gibson
{
598 70b79849 David Gibson
    PowerPCCPU *cpu = POWERPC_CPU(cs);
599 70b79849 David Gibson
    CPUPPCState *env = &cpu->env;
600 70b79849 David Gibson
    struct kvm_one_reg reg;
601 70b79849 David Gibson
    int i;
602 70b79849 David Gibson
    int ret;
603 70b79849 David Gibson
604 70b79849 David Gibson
    if (env->insns_flags & PPC_FLOAT) {
605 70b79849 David Gibson
        uint64_t fpscr;
606 70b79849 David Gibson
        bool vsx = !!(env->insns_flags2 & PPC2_VSX);
607 70b79849 David Gibson
608 70b79849 David Gibson
        reg.id = KVM_REG_PPC_FPSCR;
609 70b79849 David Gibson
        reg.addr = (uintptr_t)&fpscr;
610 70b79849 David Gibson
        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
611 70b79849 David Gibson
        if (ret < 0) {
612 da56ff91 Peter Maydell
            DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
613 70b79849 David Gibson
            return ret;
614 70b79849 David Gibson
        } else {
615 70b79849 David Gibson
            env->fpscr = fpscr;
616 70b79849 David Gibson
        }
617 70b79849 David Gibson
618 70b79849 David Gibson
        for (i = 0; i < 32; i++) {
619 70b79849 David Gibson
            uint64_t vsr[2];
620 70b79849 David Gibson
621 70b79849 David Gibson
            reg.addr = (uintptr_t) &vsr;
622 70b79849 David Gibson
            reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
623 70b79849 David Gibson
624 70b79849 David Gibson
            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
625 70b79849 David Gibson
            if (ret < 0) {
626 da56ff91 Peter Maydell
                DPRINTF("Unable to get %s%d from KVM: %s\n",
627 70b79849 David Gibson
                        vsx ? "VSR" : "FPR", i, strerror(errno));
628 70b79849 David Gibson
                return ret;
629 70b79849 David Gibson
            } else {
630 70b79849 David Gibson
                env->fpr[i] = vsr[0];
631 70b79849 David Gibson
                if (vsx) {
632 70b79849 David Gibson
                    env->vsr[i] = vsr[1];
633 70b79849 David Gibson
                }
634 70b79849 David Gibson
            }
635 70b79849 David Gibson
        }
636 70b79849 David Gibson
    }
637 70b79849 David Gibson
638 70b79849 David Gibson
    if (env->insns_flags & PPC_ALTIVEC) {
639 70b79849 David Gibson
        reg.id = KVM_REG_PPC_VSCR;
640 70b79849 David Gibson
        reg.addr = (uintptr_t)&env->vscr;
641 70b79849 David Gibson
        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
642 70b79849 David Gibson
        if (ret < 0) {
643 da56ff91 Peter Maydell
            DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
644 70b79849 David Gibson
            return ret;
645 70b79849 David Gibson
        }
646 70b79849 David Gibson
647 70b79849 David Gibson
        for (i = 0; i < 32; i++) {
648 70b79849 David Gibson
            reg.id = KVM_REG_PPC_VR(i);
649 70b79849 David Gibson
            reg.addr = (uintptr_t)&env->avr[i];
650 70b79849 David Gibson
            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
651 70b79849 David Gibson
            if (ret < 0) {
652 da56ff91 Peter Maydell
                DPRINTF("Unable to get VR%d from KVM: %s\n",
653 70b79849 David Gibson
                        i, strerror(errno));
654 70b79849 David Gibson
                return ret;
655 70b79849 David Gibson
            }
656 70b79849 David Gibson
        }
657 70b79849 David Gibson
    }
658 70b79849 David Gibson
659 70b79849 David Gibson
    return 0;
660 70b79849 David Gibson
}
661 70b79849 David Gibson
662 9b00ea49 David Gibson
#if defined(TARGET_PPC64)
663 9b00ea49 David Gibson
static int kvm_get_vpa(CPUState *cs)
664 9b00ea49 David Gibson
{
665 9b00ea49 David Gibson
    PowerPCCPU *cpu = POWERPC_CPU(cs);
666 9b00ea49 David Gibson
    CPUPPCState *env = &cpu->env;
667 9b00ea49 David Gibson
    struct kvm_one_reg reg;
668 9b00ea49 David Gibson
    int ret;
669 9b00ea49 David Gibson
670 9b00ea49 David Gibson
    reg.id = KVM_REG_PPC_VPA_ADDR;
671 9b00ea49 David Gibson
    reg.addr = (uintptr_t)&env->vpa_addr;
672 9b00ea49 David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
673 9b00ea49 David Gibson
    if (ret < 0) {
674 da56ff91 Peter Maydell
        DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
675 9b00ea49 David Gibson
        return ret;
676 9b00ea49 David Gibson
    }
677 9b00ea49 David Gibson
678 9b00ea49 David Gibson
    assert((uintptr_t)&env->slb_shadow_size
679 9b00ea49 David Gibson
           == ((uintptr_t)&env->slb_shadow_addr + 8));
680 9b00ea49 David Gibson
    reg.id = KVM_REG_PPC_VPA_SLB;
681 9b00ea49 David Gibson
    reg.addr = (uintptr_t)&env->slb_shadow_addr;
682 9b00ea49 David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
683 9b00ea49 David Gibson
    if (ret < 0) {
684 da56ff91 Peter Maydell
        DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
685 9b00ea49 David Gibson
                strerror(errno));
686 9b00ea49 David Gibson
        return ret;
687 9b00ea49 David Gibson
    }
688 9b00ea49 David Gibson
689 9b00ea49 David Gibson
    assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
690 9b00ea49 David Gibson
    reg.id = KVM_REG_PPC_VPA_DTL;
691 9b00ea49 David Gibson
    reg.addr = (uintptr_t)&env->dtl_addr;
692 9b00ea49 David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
693 9b00ea49 David Gibson
    if (ret < 0) {
694 da56ff91 Peter Maydell
        DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
695 9b00ea49 David Gibson
                strerror(errno));
696 9b00ea49 David Gibson
        return ret;
697 9b00ea49 David Gibson
    }
698 9b00ea49 David Gibson
699 9b00ea49 David Gibson
    return 0;
700 9b00ea49 David Gibson
}
701 9b00ea49 David Gibson
702 9b00ea49 David Gibson
static int kvm_put_vpa(CPUState *cs)
703 9b00ea49 David Gibson
{
704 9b00ea49 David Gibson
    PowerPCCPU *cpu = POWERPC_CPU(cs);
705 9b00ea49 David Gibson
    CPUPPCState *env = &cpu->env;
706 9b00ea49 David Gibson
    struct kvm_one_reg reg;
707 9b00ea49 David Gibson
    int ret;
708 9b00ea49 David Gibson
709 9b00ea49 David Gibson
    /* SLB shadow or DTL can't be registered unless a master VPA is
710 9b00ea49 David Gibson
     * registered.  That means when restoring state, if a VPA *is*
711 9b00ea49 David Gibson
     * registered, we need to set that up first.  If not, we need to
712 9b00ea49 David Gibson
     * deregister the others before deregistering the master VPA */
713 9b00ea49 David Gibson
    assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
714 9b00ea49 David Gibson
715 9b00ea49 David Gibson
    if (env->vpa_addr) {
716 9b00ea49 David Gibson
        reg.id = KVM_REG_PPC_VPA_ADDR;
717 9b00ea49 David Gibson
        reg.addr = (uintptr_t)&env->vpa_addr;
718 9b00ea49 David Gibson
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
719 9b00ea49 David Gibson
        if (ret < 0) {
720 da56ff91 Peter Maydell
            DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
721 9b00ea49 David Gibson
            return ret;
722 9b00ea49 David Gibson
        }
723 9b00ea49 David Gibson
    }
724 9b00ea49 David Gibson
725 9b00ea49 David Gibson
    assert((uintptr_t)&env->slb_shadow_size
726 9b00ea49 David Gibson
           == ((uintptr_t)&env->slb_shadow_addr + 8));
727 9b00ea49 David Gibson
    reg.id = KVM_REG_PPC_VPA_SLB;
728 9b00ea49 David Gibson
    reg.addr = (uintptr_t)&env->slb_shadow_addr;
729 9b00ea49 David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
730 9b00ea49 David Gibson
    if (ret < 0) {
731 da56ff91 Peter Maydell
        DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
732 9b00ea49 David Gibson
        return ret;
733 9b00ea49 David Gibson
    }
734 9b00ea49 David Gibson
735 9b00ea49 David Gibson
    assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
736 9b00ea49 David Gibson
    reg.id = KVM_REG_PPC_VPA_DTL;
737 9b00ea49 David Gibson
    reg.addr = (uintptr_t)&env->dtl_addr;
738 9b00ea49 David Gibson
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
739 9b00ea49 David Gibson
    if (ret < 0) {
740 da56ff91 Peter Maydell
        DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
741 9b00ea49 David Gibson
                strerror(errno));
742 9b00ea49 David Gibson
        return ret;
743 9b00ea49 David Gibson
    }
744 9b00ea49 David Gibson
745 9b00ea49 David Gibson
    if (!env->vpa_addr) {
746 9b00ea49 David Gibson
        reg.id = KVM_REG_PPC_VPA_ADDR;
747 9b00ea49 David Gibson
        reg.addr = (uintptr_t)&env->vpa_addr;
748 9b00ea49 David Gibson
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
749 9b00ea49 David Gibson
        if (ret < 0) {
750 da56ff91 Peter Maydell
            DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
751 9b00ea49 David Gibson
            return ret;
752 9b00ea49 David Gibson
        }
753 9b00ea49 David Gibson
    }
754 9b00ea49 David Gibson
755 9b00ea49 David Gibson
    return 0;
756 9b00ea49 David Gibson
}
757 9b00ea49 David Gibson
#endif /* TARGET_PPC64 */
758 9b00ea49 David Gibson
759 20d695a9 Andreas Färber
int kvm_arch_put_registers(CPUState *cs, int level)
760 d76d1650 aurel32
{
761 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
762 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
763 d76d1650 aurel32
    struct kvm_regs regs;
764 d76d1650 aurel32
    int ret;
765 d76d1650 aurel32
    int i;
766 d76d1650 aurel32
767 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
768 1bc22652 Andreas Färber
    if (ret < 0) {
769 d76d1650 aurel32
        return ret;
770 1bc22652 Andreas Färber
    }
771 d76d1650 aurel32
772 d76d1650 aurel32
    regs.ctr = env->ctr;
773 d76d1650 aurel32
    regs.lr  = env->lr;
774 da91a00f Richard Henderson
    regs.xer = cpu_read_xer(env);
775 d76d1650 aurel32
    regs.msr = env->msr;
776 d76d1650 aurel32
    regs.pc = env->nip;
777 d76d1650 aurel32
778 d76d1650 aurel32
    regs.srr0 = env->spr[SPR_SRR0];
779 d76d1650 aurel32
    regs.srr1 = env->spr[SPR_SRR1];
780 d76d1650 aurel32
781 d76d1650 aurel32
    regs.sprg0 = env->spr[SPR_SPRG0];
782 d76d1650 aurel32
    regs.sprg1 = env->spr[SPR_SPRG1];
783 d76d1650 aurel32
    regs.sprg2 = env->spr[SPR_SPRG2];
784 d76d1650 aurel32
    regs.sprg3 = env->spr[SPR_SPRG3];
785 d76d1650 aurel32
    regs.sprg4 = env->spr[SPR_SPRG4];
786 d76d1650 aurel32
    regs.sprg5 = env->spr[SPR_SPRG5];
787 d76d1650 aurel32
    regs.sprg6 = env->spr[SPR_SPRG6];
788 d76d1650 aurel32
    regs.sprg7 = env->spr[SPR_SPRG7];
789 d76d1650 aurel32
790 90dc8812 Scott Wood
    regs.pid = env->spr[SPR_BOOKE_PID];
791 90dc8812 Scott Wood
792 d76d1650 aurel32
    for (i = 0;i < 32; i++)
793 d76d1650 aurel32
        regs.gpr[i] = env->gpr[i];
794 d76d1650 aurel32
795 4bddaf55 Alexey Kardashevskiy
    regs.cr = 0;
796 4bddaf55 Alexey Kardashevskiy
    for (i = 0; i < 8; i++) {
797 4bddaf55 Alexey Kardashevskiy
        regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
798 4bddaf55 Alexey Kardashevskiy
    }
799 4bddaf55 Alexey Kardashevskiy
800 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
801 d76d1650 aurel32
    if (ret < 0)
802 d76d1650 aurel32
        return ret;
803 d76d1650 aurel32
804 70b79849 David Gibson
    kvm_put_fp(cs);
805 70b79849 David Gibson
806 93dd5e85 Scott Wood
    if (env->tlb_dirty) {
807 1bc22652 Andreas Färber
        kvm_sw_tlb_put(cpu);
808 93dd5e85 Scott Wood
        env->tlb_dirty = false;
809 93dd5e85 Scott Wood
    }
810 93dd5e85 Scott Wood
811 f1af19d7 David Gibson
    if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
812 f1af19d7 David Gibson
        struct kvm_sregs sregs;
813 f1af19d7 David Gibson
814 f1af19d7 David Gibson
        sregs.pvr = env->spr[SPR_PVR];
815 f1af19d7 David Gibson
816 f1af19d7 David Gibson
        sregs.u.s.sdr1 = env->spr[SPR_SDR1];
817 f1af19d7 David Gibson
818 f1af19d7 David Gibson
        /* Sync SLB */
819 f1af19d7 David Gibson
#ifdef TARGET_PPC64
820 d83af167 Aneesh Kumar K.V
        for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
821 f1af19d7 David Gibson
            sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
822 69b31b90 Alexey Kardashevskiy
            if (env->slb[i].esid & SLB_ESID_V) {
823 69b31b90 Alexey Kardashevskiy
                sregs.u.s.ppc64.slb[i].slbe |= i;
824 69b31b90 Alexey Kardashevskiy
            }
825 f1af19d7 David Gibson
            sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
826 f1af19d7 David Gibson
        }
827 f1af19d7 David Gibson
#endif
828 f1af19d7 David Gibson
829 f1af19d7 David Gibson
        /* Sync SRs */
830 f1af19d7 David Gibson
        for (i = 0; i < 16; i++) {
831 f1af19d7 David Gibson
            sregs.u.s.ppc32.sr[i] = env->sr[i];
832 f1af19d7 David Gibson
        }
833 f1af19d7 David Gibson
834 f1af19d7 David Gibson
        /* Sync BATs */
835 f1af19d7 David Gibson
        for (i = 0; i < 8; i++) {
836 ef8beb0e Alexander Graf
            /* Beware. We have to swap upper and lower bits here */
837 ef8beb0e Alexander Graf
            sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
838 ef8beb0e Alexander Graf
                | env->DBAT[1][i];
839 ef8beb0e Alexander Graf
            sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
840 ef8beb0e Alexander Graf
                | env->IBAT[1][i];
841 f1af19d7 David Gibson
        }
842 f1af19d7 David Gibson
843 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
844 f1af19d7 David Gibson
        if (ret) {
845 f1af19d7 David Gibson
            return ret;
846 f1af19d7 David Gibson
        }
847 f1af19d7 David Gibson
    }
848 f1af19d7 David Gibson
849 f1af19d7 David Gibson
    if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
850 d67d40ea David Gibson
        kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
851 d67d40ea David Gibson
    }
852 f1af19d7 David Gibson
853 d67d40ea David Gibson
    if (cap_one_reg) {
854 d67d40ea David Gibson
        int i;
855 d67d40ea David Gibson
856 d67d40ea David Gibson
        /* We deliberately ignore errors here, for kernels which have
857 d67d40ea David Gibson
         * the ONE_REG calls, but don't support the specific
858 d67d40ea David Gibson
         * registers, there's a reasonable chance things will still
859 d67d40ea David Gibson
         * work, at least until we try to migrate. */
860 d67d40ea David Gibson
        for (i = 0; i < 1024; i++) {
861 d67d40ea David Gibson
            uint64_t id = env->spr_cb[i].one_reg_id;
862 d67d40ea David Gibson
863 d67d40ea David Gibson
            if (id != 0) {
864 d67d40ea David Gibson
                kvm_put_one_spr(cs, id, i);
865 d67d40ea David Gibson
            }
866 f1af19d7 David Gibson
        }
867 9b00ea49 David Gibson
868 9b00ea49 David Gibson
#ifdef TARGET_PPC64
869 9b00ea49 David Gibson
        if (cap_papr) {
870 9b00ea49 David Gibson
            if (kvm_put_vpa(cs) < 0) {
871 da56ff91 Peter Maydell
                DPRINTF("Warning: Unable to set VPA information to KVM\n");
872 9b00ea49 David Gibson
            }
873 9b00ea49 David Gibson
        }
874 9b00ea49 David Gibson
#endif /* TARGET_PPC64 */
875 f1af19d7 David Gibson
    }
876 f1af19d7 David Gibson
877 d76d1650 aurel32
    return ret;
878 d76d1650 aurel32
}
879 d76d1650 aurel32
880 20d695a9 Andreas Färber
int kvm_arch_get_registers(CPUState *cs)
881 d76d1650 aurel32
{
882 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
883 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
884 d76d1650 aurel32
    struct kvm_regs regs;
885 ba5e5090 Alexander Graf
    struct kvm_sregs sregs;
886 90dc8812 Scott Wood
    uint32_t cr;
887 138b38b6 Alexander Graf
    int i, ret;
888 d76d1650 aurel32
889 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
890 d76d1650 aurel32
    if (ret < 0)
891 d76d1650 aurel32
        return ret;
892 d76d1650 aurel32
893 90dc8812 Scott Wood
    cr = regs.cr;
894 90dc8812 Scott Wood
    for (i = 7; i >= 0; i--) {
895 90dc8812 Scott Wood
        env->crf[i] = cr & 15;
896 90dc8812 Scott Wood
        cr >>= 4;
897 90dc8812 Scott Wood
    }
898 ba5e5090 Alexander Graf
899 d76d1650 aurel32
    env->ctr = regs.ctr;
900 d76d1650 aurel32
    env->lr = regs.lr;
901 da91a00f Richard Henderson
    cpu_write_xer(env, regs.xer);
902 d76d1650 aurel32
    env->msr = regs.msr;
903 d76d1650 aurel32
    env->nip = regs.pc;
904 d76d1650 aurel32
905 d76d1650 aurel32
    env->spr[SPR_SRR0] = regs.srr0;
906 d76d1650 aurel32
    env->spr[SPR_SRR1] = regs.srr1;
907 d76d1650 aurel32
908 d76d1650 aurel32
    env->spr[SPR_SPRG0] = regs.sprg0;
909 d76d1650 aurel32
    env->spr[SPR_SPRG1] = regs.sprg1;
910 d76d1650 aurel32
    env->spr[SPR_SPRG2] = regs.sprg2;
911 d76d1650 aurel32
    env->spr[SPR_SPRG3] = regs.sprg3;
912 d76d1650 aurel32
    env->spr[SPR_SPRG4] = regs.sprg4;
913 d76d1650 aurel32
    env->spr[SPR_SPRG5] = regs.sprg5;
914 d76d1650 aurel32
    env->spr[SPR_SPRG6] = regs.sprg6;
915 d76d1650 aurel32
    env->spr[SPR_SPRG7] = regs.sprg7;
916 d76d1650 aurel32
917 90dc8812 Scott Wood
    env->spr[SPR_BOOKE_PID] = regs.pid;
918 90dc8812 Scott Wood
919 d76d1650 aurel32
    for (i = 0;i < 32; i++)
920 d76d1650 aurel32
        env->gpr[i] = regs.gpr[i];
921 d76d1650 aurel32
922 70b79849 David Gibson
    kvm_get_fp(cs);
923 70b79849 David Gibson
924 90dc8812 Scott Wood
    if (cap_booke_sregs) {
925 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
926 90dc8812 Scott Wood
        if (ret < 0) {
927 90dc8812 Scott Wood
            return ret;
928 90dc8812 Scott Wood
        }
929 90dc8812 Scott Wood
930 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_BASE) {
931 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
932 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
933 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
934 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
935 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
936 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
937 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
938 90dc8812 Scott Wood
            env->spr[SPR_DECR] = sregs.u.e.dec;
939 90dc8812 Scott Wood
            env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
940 90dc8812 Scott Wood
            env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
941 90dc8812 Scott Wood
            env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
942 90dc8812 Scott Wood
        }
943 90dc8812 Scott Wood
944 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
945 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
946 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
947 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
948 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
949 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
950 90dc8812 Scott Wood
        }
951 90dc8812 Scott Wood
952 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_64) {
953 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
954 90dc8812 Scott Wood
        }
955 90dc8812 Scott Wood
956 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
957 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
958 90dc8812 Scott Wood
        }
959 90dc8812 Scott Wood
960 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
961 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
962 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
963 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
964 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
965 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
966 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
967 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
968 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
969 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
970 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
971 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
972 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
973 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
974 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
975 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
976 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
977 90dc8812 Scott Wood
978 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_SPE) {
979 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
980 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
981 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
982 90dc8812 Scott Wood
            }
983 90dc8812 Scott Wood
984 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_PM) {
985 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
986 90dc8812 Scott Wood
            }
987 90dc8812 Scott Wood
988 90dc8812 Scott Wood
            if (sregs.u.e.features & KVM_SREGS_E_PC) {
989 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
990 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
991 90dc8812 Scott Wood
            }
992 90dc8812 Scott Wood
        }
993 90dc8812 Scott Wood
994 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
995 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
996 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
997 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
998 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
999 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1000 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1001 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1002 90dc8812 Scott Wood
            env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1003 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1004 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1005 90dc8812 Scott Wood
        }
1006 90dc8812 Scott Wood
1007 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_EXP) {
1008 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1009 90dc8812 Scott Wood
        }
1010 90dc8812 Scott Wood
1011 90dc8812 Scott Wood
        if (sregs.u.e.features & KVM_SREGS_E_PD) {
1012 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1013 90dc8812 Scott Wood
            env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1014 90dc8812 Scott Wood
        }
1015 90dc8812 Scott Wood
1016 90dc8812 Scott Wood
        if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1017 90dc8812 Scott Wood
            env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1018 90dc8812 Scott Wood
            env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1019 90dc8812 Scott Wood
            env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1020 90dc8812 Scott Wood
1021 90dc8812 Scott Wood
            if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1022 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1023 90dc8812 Scott Wood
                env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1024 90dc8812 Scott Wood
            }
1025 90dc8812 Scott Wood
        }
1026 fafc0b6a Alexander Graf
    }
1027 90dc8812 Scott Wood
1028 90dc8812 Scott Wood
    if (cap_segstate) {
1029 1bc22652 Andreas Färber
        ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
1030 90dc8812 Scott Wood
        if (ret < 0) {
1031 90dc8812 Scott Wood
            return ret;
1032 90dc8812 Scott Wood
        }
1033 90dc8812 Scott Wood
1034 f3c75d42 Aneesh Kumar K.V
        if (!env->external_htab) {
1035 f3c75d42 Aneesh Kumar K.V
            ppc_store_sdr1(env, sregs.u.s.sdr1);
1036 f3c75d42 Aneesh Kumar K.V
        }
1037 ba5e5090 Alexander Graf
1038 ba5e5090 Alexander Graf
        /* Sync SLB */
1039 82c09f2f Alexander Graf
#ifdef TARGET_PPC64
1040 4b4d4a21 Aneesh Kumar K.V
        /*
1041 4b4d4a21 Aneesh Kumar K.V
         * The packed SLB array we get from KVM_GET_SREGS only contains
1042 4b4d4a21 Aneesh Kumar K.V
         * information about valid entries. So we flush our internal
1043 4b4d4a21 Aneesh Kumar K.V
         * copy to get rid of stale ones, then put all valid SLB entries
1044 4b4d4a21 Aneesh Kumar K.V
         * back in.
1045 4b4d4a21 Aneesh Kumar K.V
         */
1046 4b4d4a21 Aneesh Kumar K.V
        memset(env->slb, 0, sizeof(env->slb));
1047 d83af167 Aneesh Kumar K.V
        for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
1048 4b4d4a21 Aneesh Kumar K.V
            target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1049 4b4d4a21 Aneesh Kumar K.V
            target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1050 4b4d4a21 Aneesh Kumar K.V
            /*
1051 4b4d4a21 Aneesh Kumar K.V
             * Only restore valid entries
1052 4b4d4a21 Aneesh Kumar K.V
             */
1053 4b4d4a21 Aneesh Kumar K.V
            if (rb & SLB_ESID_V) {
1054 4b4d4a21 Aneesh Kumar K.V
                ppc_store_slb(env, rb, rs);
1055 4b4d4a21 Aneesh Kumar K.V
            }
1056 ba5e5090 Alexander Graf
        }
1057 82c09f2f Alexander Graf
#endif
1058 ba5e5090 Alexander Graf
1059 ba5e5090 Alexander Graf
        /* Sync SRs */
1060 ba5e5090 Alexander Graf
        for (i = 0; i < 16; i++) {
1061 ba5e5090 Alexander Graf
            env->sr[i] = sregs.u.s.ppc32.sr[i];
1062 ba5e5090 Alexander Graf
        }
1063 ba5e5090 Alexander Graf
1064 ba5e5090 Alexander Graf
        /* Sync BATs */
1065 ba5e5090 Alexander Graf
        for (i = 0; i < 8; i++) {
1066 ba5e5090 Alexander Graf
            env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1067 ba5e5090 Alexander Graf
            env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1068 ba5e5090 Alexander Graf
            env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1069 ba5e5090 Alexander Graf
            env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1070 ba5e5090 Alexander Graf
        }
1071 fafc0b6a Alexander Graf
    }
1072 ba5e5090 Alexander Graf
1073 d67d40ea David Gibson
    if (cap_hior) {
1074 d67d40ea David Gibson
        kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1075 d67d40ea David Gibson
    }
1076 d67d40ea David Gibson
1077 d67d40ea David Gibson
    if (cap_one_reg) {
1078 d67d40ea David Gibson
        int i;
1079 d67d40ea David Gibson
1080 d67d40ea David Gibson
        /* We deliberately ignore errors here, for kernels which have
1081 d67d40ea David Gibson
         * the ONE_REG calls, but don't support the specific
1082 d67d40ea David Gibson
         * registers, there's a reasonable chance things will still
1083 d67d40ea David Gibson
         * work, at least until we try to migrate. */
1084 d67d40ea David Gibson
        for (i = 0; i < 1024; i++) {
1085 d67d40ea David Gibson
            uint64_t id = env->spr_cb[i].one_reg_id;
1086 d67d40ea David Gibson
1087 d67d40ea David Gibson
            if (id != 0) {
1088 d67d40ea David Gibson
                kvm_get_one_spr(cs, id, i);
1089 d67d40ea David Gibson
            }
1090 d67d40ea David Gibson
        }
1091 9b00ea49 David Gibson
1092 9b00ea49 David Gibson
#ifdef TARGET_PPC64
1093 9b00ea49 David Gibson
        if (cap_papr) {
1094 9b00ea49 David Gibson
            if (kvm_get_vpa(cs) < 0) {
1095 da56ff91 Peter Maydell
                DPRINTF("Warning: Unable to get VPA information from KVM\n");
1096 9b00ea49 David Gibson
            }
1097 9b00ea49 David Gibson
        }
1098 9b00ea49 David Gibson
#endif
1099 d67d40ea David Gibson
    }
1100 d67d40ea David Gibson
1101 d76d1650 aurel32
    return 0;
1102 d76d1650 aurel32
}
1103 d76d1650 aurel32
1104 1bc22652 Andreas Färber
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1105 fc87e185 Alexander Graf
{
1106 fc87e185 Alexander Graf
    unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1107 fc87e185 Alexander Graf
1108 fc87e185 Alexander Graf
    if (irq != PPC_INTERRUPT_EXT) {
1109 fc87e185 Alexander Graf
        return 0;
1110 fc87e185 Alexander Graf
    }
1111 fc87e185 Alexander Graf
1112 fc87e185 Alexander Graf
    if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
1113 fc87e185 Alexander Graf
        return 0;
1114 fc87e185 Alexander Graf
    }
1115 fc87e185 Alexander Graf
1116 1bc22652 Andreas Färber
    kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1117 fc87e185 Alexander Graf
1118 fc87e185 Alexander Graf
    return 0;
1119 fc87e185 Alexander Graf
}
1120 fc87e185 Alexander Graf
1121 16415335 Alexander Graf
#if defined(TARGET_PPCEMB)
1122 16415335 Alexander Graf
#define PPC_INPUT_INT PPC40x_INPUT_INT
1123 16415335 Alexander Graf
#elif defined(TARGET_PPC64)
1124 16415335 Alexander Graf
#define PPC_INPUT_INT PPC970_INPUT_INT
1125 16415335 Alexander Graf
#else
1126 16415335 Alexander Graf
#define PPC_INPUT_INT PPC6xx_INPUT_INT
1127 16415335 Alexander Graf
#endif
1128 16415335 Alexander Graf
1129 20d695a9 Andreas Färber
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1130 d76d1650 aurel32
{
1131 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
1132 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
1133 d76d1650 aurel32
    int r;
1134 d76d1650 aurel32
    unsigned irq;
1135 d76d1650 aurel32
1136 5cbdb3a3 Stefan Weil
    /* PowerPC QEMU tracks the various core input pins (interrupt, critical
1137 d76d1650 aurel32
     * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
1138 fc87e185 Alexander Graf
    if (!cap_interrupt_level &&
1139 fc87e185 Alexander Graf
        run->ready_for_interrupt_injection &&
1140 259186a7 Andreas Färber
        (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
1141 16415335 Alexander Graf
        (env->irq_input_state & (1<<PPC_INPUT_INT)))
1142 d76d1650 aurel32
    {
1143 d76d1650 aurel32
        /* For now KVM disregards the 'irq' argument. However, in the
1144 d76d1650 aurel32
         * future KVM could cache it in-kernel to avoid a heavyweight exit
1145 d76d1650 aurel32
         * when reading the UIC.
1146 d76d1650 aurel32
         */
1147 fc87e185 Alexander Graf
        irq = KVM_INTERRUPT_SET;
1148 d76d1650 aurel32
1149 da56ff91 Peter Maydell
        DPRINTF("injected interrupt %d\n", irq);
1150 1bc22652 Andreas Färber
        r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
1151 55e5c285 Andreas Färber
        if (r < 0) {
1152 55e5c285 Andreas Färber
            printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
1153 55e5c285 Andreas Färber
        }
1154 c821c2bd Alexander Graf
1155 c821c2bd Alexander Graf
        /* Always wake up soon in case the interrupt was level based */
1156 bc72ad67 Alex Bligh
        timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1157 c821c2bd Alexander Graf
                       (get_ticks_per_sec() / 50));
1158 d76d1650 aurel32
    }
1159 d76d1650 aurel32
1160 d76d1650 aurel32
    /* We don't know if there are more interrupts pending after this. However,
1161 d76d1650 aurel32
     * the guest will return to userspace in the course of handling this one
1162 d76d1650 aurel32
     * anyways, so we will get a chance to deliver the rest. */
1163 d76d1650 aurel32
}
1164 d76d1650 aurel32
1165 20d695a9 Andreas Färber
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
1166 d76d1650 aurel32
{
1167 d76d1650 aurel32
}
1168 d76d1650 aurel32
1169 20d695a9 Andreas Färber
int kvm_arch_process_async_events(CPUState *cs)
1170 0af691d7 Marcelo Tosatti
{
1171 259186a7 Andreas Färber
    return cs->halted;
1172 0af691d7 Marcelo Tosatti
}
1173 0af691d7 Marcelo Tosatti
1174 259186a7 Andreas Färber
static int kvmppc_handle_halt(PowerPCCPU *cpu)
1175 d76d1650 aurel32
{
1176 259186a7 Andreas Färber
    CPUState *cs = CPU(cpu);
1177 259186a7 Andreas Färber
    CPUPPCState *env = &cpu->env;
1178 259186a7 Andreas Färber
1179 259186a7 Andreas Färber
    if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
1180 259186a7 Andreas Färber
        cs->halted = 1;
1181 d76d1650 aurel32
        env->exception_index = EXCP_HLT;
1182 d76d1650 aurel32
    }
1183 d76d1650 aurel32
1184 bb4ea393 Jan Kiszka
    return 0;
1185 d76d1650 aurel32
}
1186 d76d1650 aurel32
1187 d76d1650 aurel32
/* map dcr access to existing qemu dcr emulation */
1188 1328c2bf Andreas Färber
static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
1189 d76d1650 aurel32
{
1190 d76d1650 aurel32
    if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
1191 d76d1650 aurel32
        fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1192 d76d1650 aurel32
1193 bb4ea393 Jan Kiszka
    return 0;
1194 d76d1650 aurel32
}
1195 d76d1650 aurel32
1196 1328c2bf Andreas Färber
static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
1197 d76d1650 aurel32
{
1198 d76d1650 aurel32
    if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
1199 d76d1650 aurel32
        fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1200 d76d1650 aurel32
1201 bb4ea393 Jan Kiszka
    return 0;
1202 d76d1650 aurel32
}
1203 d76d1650 aurel32
1204 20d695a9 Andreas Färber
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1205 d76d1650 aurel32
{
1206 20d695a9 Andreas Färber
    PowerPCCPU *cpu = POWERPC_CPU(cs);
1207 20d695a9 Andreas Färber
    CPUPPCState *env = &cpu->env;
1208 bb4ea393 Jan Kiszka
    int ret;
1209 d76d1650 aurel32
1210 d76d1650 aurel32
    switch (run->exit_reason) {
1211 d76d1650 aurel32
    case KVM_EXIT_DCR:
1212 d76d1650 aurel32
        if (run->dcr.is_write) {
1213 da56ff91 Peter Maydell
            DPRINTF("handle dcr write\n");
1214 d76d1650 aurel32
            ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1215 d76d1650 aurel32
        } else {
1216 da56ff91 Peter Maydell
            DPRINTF("handle dcr read\n");
1217 d76d1650 aurel32
            ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1218 d76d1650 aurel32
        }
1219 d76d1650 aurel32
        break;
1220 d76d1650 aurel32
    case KVM_EXIT_HLT:
1221 da56ff91 Peter Maydell
        DPRINTF("handle halt\n");
1222 259186a7 Andreas Färber
        ret = kvmppc_handle_halt(cpu);
1223 d76d1650 aurel32
        break;
1224 c6304a4a David Gibson
#if defined(TARGET_PPC64)
1225 f61b4bed Alexander Graf
    case KVM_EXIT_PAPR_HCALL:
1226 da56ff91 Peter Maydell
        DPRINTF("handle PAPR hypercall\n");
1227 20d695a9 Andreas Färber
        run->papr_hcall.ret = spapr_hypercall(cpu,
1228 aa100fa4 Andreas Färber
                                              run->papr_hcall.nr,
1229 f61b4bed Alexander Graf
                                              run->papr_hcall.args);
1230 78e8fde2 David Gibson
        ret = 0;
1231 f61b4bed Alexander Graf
        break;
1232 f61b4bed Alexander Graf
#endif
1233 5b95b8b9 Alexander Graf
    case KVM_EXIT_EPR:
1234 da56ff91 Peter Maydell
        DPRINTF("handle epr\n");
1235 933b19ea Alexander Graf
        run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
1236 5b95b8b9 Alexander Graf
        ret = 0;
1237 5b95b8b9 Alexander Graf
        break;
1238 31f2cb8f Bharat Bhushan
    case KVM_EXIT_WATCHDOG:
1239 da56ff91 Peter Maydell
        DPRINTF("handle watchdog expiry\n");
1240 31f2cb8f Bharat Bhushan
        watchdog_perform_action();
1241 31f2cb8f Bharat Bhushan
        ret = 0;
1242 31f2cb8f Bharat Bhushan
        break;
1243 31f2cb8f Bharat Bhushan
1244 73aaec4a Jan Kiszka
    default:
1245 73aaec4a Jan Kiszka
        fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1246 73aaec4a Jan Kiszka
        ret = -1;
1247 73aaec4a Jan Kiszka
        break;
1248 d76d1650 aurel32
    }
1249 d76d1650 aurel32
1250 d76d1650 aurel32
    return ret;
1251 d76d1650 aurel32
}
1252 d76d1650 aurel32
1253 31f2cb8f Bharat Bhushan
int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1254 31f2cb8f Bharat Bhushan
{
1255 31f2cb8f Bharat Bhushan
    CPUState *cs = CPU(cpu);
1256 31f2cb8f Bharat Bhushan
    uint32_t bits = tsr_bits;
1257 31f2cb8f Bharat Bhushan
    struct kvm_one_reg reg = {
1258 31f2cb8f Bharat Bhushan
        .id = KVM_REG_PPC_OR_TSR,
1259 31f2cb8f Bharat Bhushan
        .addr = (uintptr_t) &bits,
1260 31f2cb8f Bharat Bhushan
    };
1261 31f2cb8f Bharat Bhushan
1262 31f2cb8f Bharat Bhushan
    return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1263 31f2cb8f Bharat Bhushan
}
1264 31f2cb8f Bharat Bhushan
1265 31f2cb8f Bharat Bhushan
int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1266 31f2cb8f Bharat Bhushan
{
1267 31f2cb8f Bharat Bhushan
1268 31f2cb8f Bharat Bhushan
    CPUState *cs = CPU(cpu);
1269 31f2cb8f Bharat Bhushan
    uint32_t bits = tsr_bits;
1270 31f2cb8f Bharat Bhushan
    struct kvm_one_reg reg = {
1271 31f2cb8f Bharat Bhushan
        .id = KVM_REG_PPC_CLEAR_TSR,
1272 31f2cb8f Bharat Bhushan
        .addr = (uintptr_t) &bits,
1273 31f2cb8f Bharat Bhushan
    };
1274 31f2cb8f Bharat Bhushan
1275 31f2cb8f Bharat Bhushan
    return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1276 31f2cb8f Bharat Bhushan
}
1277 31f2cb8f Bharat Bhushan
1278 31f2cb8f Bharat Bhushan
int kvmppc_set_tcr(PowerPCCPU *cpu)
1279 31f2cb8f Bharat Bhushan
{
1280 31f2cb8f Bharat Bhushan
    CPUState *cs = CPU(cpu);
1281 31f2cb8f Bharat Bhushan
    CPUPPCState *env = &cpu->env;
1282 31f2cb8f Bharat Bhushan
    uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1283 31f2cb8f Bharat Bhushan
1284 31f2cb8f Bharat Bhushan
    struct kvm_one_reg reg = {
1285 31f2cb8f Bharat Bhushan
        .id = KVM_REG_PPC_TCR,
1286 31f2cb8f Bharat Bhushan
        .addr = (uintptr_t) &tcr,
1287 31f2cb8f Bharat Bhushan
    };
1288 31f2cb8f Bharat Bhushan
1289 31f2cb8f Bharat Bhushan
    return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1290 31f2cb8f Bharat Bhushan
}
1291 31f2cb8f Bharat Bhushan
1292 31f2cb8f Bharat Bhushan
int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1293 31f2cb8f Bharat Bhushan
{
1294 31f2cb8f Bharat Bhushan
    CPUState *cs = CPU(cpu);
1295 31f2cb8f Bharat Bhushan
    struct kvm_enable_cap encap = {};
1296 31f2cb8f Bharat Bhushan
    int ret;
1297 31f2cb8f Bharat Bhushan
1298 31f2cb8f Bharat Bhushan
    if (!kvm_enabled()) {
1299 31f2cb8f Bharat Bhushan
        return -1;
1300 31f2cb8f Bharat Bhushan
    }
1301 31f2cb8f Bharat Bhushan
1302 31f2cb8f Bharat Bhushan
    if (!cap_ppc_watchdog) {
1303 31f2cb8f Bharat Bhushan
        printf("warning: KVM does not support watchdog");
1304 31f2cb8f Bharat Bhushan
        return -1;
1305 31f2cb8f Bharat Bhushan
    }
1306 31f2cb8f Bharat Bhushan
1307 31f2cb8f Bharat Bhushan
    encap.cap = KVM_CAP_PPC_BOOKE_WATCHDOG;
1308 31f2cb8f Bharat Bhushan
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
1309 31f2cb8f Bharat Bhushan
    if (ret < 0) {
1310 31f2cb8f Bharat Bhushan
        fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1311 31f2cb8f Bharat Bhushan
                __func__, strerror(-ret));
1312 31f2cb8f Bharat Bhushan
        return ret;
1313 31f2cb8f Bharat Bhushan
    }
1314 31f2cb8f Bharat Bhushan
1315 31f2cb8f Bharat Bhushan
    return ret;
1316 31f2cb8f Bharat Bhushan
}
1317 31f2cb8f Bharat Bhushan
1318 dc333cd6 Alexander Graf
static int read_cpuinfo(const char *field, char *value, int len)
1319 dc333cd6 Alexander Graf
{
1320 dc333cd6 Alexander Graf
    FILE *f;
1321 dc333cd6 Alexander Graf
    int ret = -1;
1322 dc333cd6 Alexander Graf
    int field_len = strlen(field);
1323 dc333cd6 Alexander Graf
    char line[512];
1324 dc333cd6 Alexander Graf
1325 dc333cd6 Alexander Graf
    f = fopen("/proc/cpuinfo", "r");
1326 dc333cd6 Alexander Graf
    if (!f) {
1327 dc333cd6 Alexander Graf
        return -1;
1328 dc333cd6 Alexander Graf
    }
1329 dc333cd6 Alexander Graf
1330 dc333cd6 Alexander Graf
    do {
1331 dc333cd6 Alexander Graf
        if(!fgets(line, sizeof(line), f)) {
1332 dc333cd6 Alexander Graf
            break;
1333 dc333cd6 Alexander Graf
        }
1334 dc333cd6 Alexander Graf
        if (!strncmp(line, field, field_len)) {
1335 ae215068 Jim Meyering
            pstrcpy(value, len, line);
1336 dc333cd6 Alexander Graf
            ret = 0;
1337 dc333cd6 Alexander Graf
            break;
1338 dc333cd6 Alexander Graf
        }
1339 dc333cd6 Alexander Graf
    } while(*line);
1340 dc333cd6 Alexander Graf
1341 dc333cd6 Alexander Graf
    fclose(f);
1342 dc333cd6 Alexander Graf
1343 dc333cd6 Alexander Graf
    return ret;
1344 dc333cd6 Alexander Graf
}
1345 dc333cd6 Alexander Graf
1346 dc333cd6 Alexander Graf
uint32_t kvmppc_get_tbfreq(void)
1347 dc333cd6 Alexander Graf
{
1348 dc333cd6 Alexander Graf
    char line[512];
1349 dc333cd6 Alexander Graf
    char *ns;
1350 dc333cd6 Alexander Graf
    uint32_t retval = get_ticks_per_sec();
1351 dc333cd6 Alexander Graf
1352 dc333cd6 Alexander Graf
    if (read_cpuinfo("timebase", line, sizeof(line))) {
1353 dc333cd6 Alexander Graf
        return retval;
1354 dc333cd6 Alexander Graf
    }
1355 dc333cd6 Alexander Graf
1356 dc333cd6 Alexander Graf
    if (!(ns = strchr(line, ':'))) {
1357 dc333cd6 Alexander Graf
        return retval;
1358 dc333cd6 Alexander Graf
    }
1359 dc333cd6 Alexander Graf
1360 dc333cd6 Alexander Graf
    ns++;
1361 dc333cd6 Alexander Graf
1362 dc333cd6 Alexander Graf
    retval = atoi(ns);
1363 dc333cd6 Alexander Graf
    return retval;
1364 dc333cd6 Alexander Graf
}
1365 4513d923 Gleb Natapov
1366 eadaada1 Alexander Graf
/* Try to find a device tree node for a CPU with clock-frequency property */
1367 eadaada1 Alexander Graf
static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1368 eadaada1 Alexander Graf
{
1369 eadaada1 Alexander Graf
    struct dirent *dirp;
1370 eadaada1 Alexander Graf
    DIR *dp;
1371 eadaada1 Alexander Graf
1372 eadaada1 Alexander Graf
    if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
1373 eadaada1 Alexander Graf
        printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1374 eadaada1 Alexander Graf
        return -1;
1375 eadaada1 Alexander Graf
    }
1376 eadaada1 Alexander Graf
1377 eadaada1 Alexander Graf
    buf[0] = '\0';
1378 eadaada1 Alexander Graf
    while ((dirp = readdir(dp)) != NULL) {
1379 eadaada1 Alexander Graf
        FILE *f;
1380 eadaada1 Alexander Graf
        snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1381 eadaada1 Alexander Graf
                 dirp->d_name);
1382 eadaada1 Alexander Graf
        f = fopen(buf, "r");
1383 eadaada1 Alexander Graf
        if (f) {
1384 eadaada1 Alexander Graf
            snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1385 eadaada1 Alexander Graf
            fclose(f);
1386 eadaada1 Alexander Graf
            break;
1387 eadaada1 Alexander Graf
        }
1388 eadaada1 Alexander Graf
        buf[0] = '\0';
1389 eadaada1 Alexander Graf
    }
1390 eadaada1 Alexander Graf
    closedir(dp);
1391 eadaada1 Alexander Graf
    if (buf[0] == '\0') {
1392 eadaada1 Alexander Graf
        printf("Unknown host!\n");
1393 eadaada1 Alexander Graf
        return -1;
1394 eadaada1 Alexander Graf
    }
1395 eadaada1 Alexander Graf
1396 eadaada1 Alexander Graf
    return 0;
1397 eadaada1 Alexander Graf
}
1398 eadaada1 Alexander Graf
1399 9bc884b7 David Gibson
/* Read a CPU node property from the host device tree that's a single
1400 9bc884b7 David Gibson
 * integer (32-bit or 64-bit).  Returns 0 if anything goes wrong
1401 9bc884b7 David Gibson
 * (can't find or open the property, or doesn't understand the
1402 9bc884b7 David Gibson
 * format) */
1403 9bc884b7 David Gibson
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
1404 eadaada1 Alexander Graf
{
1405 9bc884b7 David Gibson
    char buf[PATH_MAX];
1406 9bc884b7 David Gibson
    union {
1407 9bc884b7 David Gibson
        uint32_t v32;
1408 9bc884b7 David Gibson
        uint64_t v64;
1409 9bc884b7 David Gibson
    } u;
1410 eadaada1 Alexander Graf
    FILE *f;
1411 eadaada1 Alexander Graf
    int len;
1412 eadaada1 Alexander Graf
1413 eadaada1 Alexander Graf
    if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
1414 9bc884b7 David Gibson
        return -1;
1415 eadaada1 Alexander Graf
    }
1416 eadaada1 Alexander Graf
1417 9bc884b7 David Gibson
    strncat(buf, "/", sizeof(buf) - strlen(buf));
1418 9bc884b7 David Gibson
    strncat(buf, propname, sizeof(buf) - strlen(buf));
1419 eadaada1 Alexander Graf
1420 eadaada1 Alexander Graf
    f = fopen(buf, "rb");
1421 eadaada1 Alexander Graf
    if (!f) {
1422 eadaada1 Alexander Graf
        return -1;
1423 eadaada1 Alexander Graf
    }
1424 eadaada1 Alexander Graf
1425 9bc884b7 David Gibson
    len = fread(&u, 1, sizeof(u), f);
1426 eadaada1 Alexander Graf
    fclose(f);
1427 eadaada1 Alexander Graf
    switch (len) {
1428 9bc884b7 David Gibson
    case 4:
1429 9bc884b7 David Gibson
        /* property is a 32-bit quantity */
1430 9bc884b7 David Gibson
        return be32_to_cpu(u.v32);
1431 9bc884b7 David Gibson
    case 8:
1432 9bc884b7 David Gibson
        return be64_to_cpu(u.v64);
1433 eadaada1 Alexander Graf
    }
1434 eadaada1 Alexander Graf
1435 eadaada1 Alexander Graf
    return 0;
1436 eadaada1 Alexander Graf
}
1437 eadaada1 Alexander Graf
1438 9bc884b7 David Gibson
uint64_t kvmppc_get_clockfreq(void)
1439 9bc884b7 David Gibson
{
1440 9bc884b7 David Gibson
    return kvmppc_read_int_cpu_dt("clock-frequency");
1441 9bc884b7 David Gibson
}
1442 9bc884b7 David Gibson
1443 6659394f David Gibson
uint32_t kvmppc_get_vmx(void)
1444 6659394f David Gibson
{
1445 6659394f David Gibson
    return kvmppc_read_int_cpu_dt("ibm,vmx");
1446 6659394f David Gibson
}
1447 6659394f David Gibson
1448 6659394f David Gibson
uint32_t kvmppc_get_dfp(void)
1449 6659394f David Gibson
{
1450 6659394f David Gibson
    return kvmppc_read_int_cpu_dt("ibm,dfp");
1451 6659394f David Gibson
}
1452 6659394f David Gibson
1453 1a61a9ae Stuart Yoder
static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1454 1a61a9ae Stuart Yoder
 {
1455 1a61a9ae Stuart Yoder
     PowerPCCPU *cpu = ppc_env_get_cpu(env);
1456 1a61a9ae Stuart Yoder
     CPUState *cs = CPU(cpu);
1457 1a61a9ae Stuart Yoder
1458 1a61a9ae Stuart Yoder
    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1459 1a61a9ae Stuart Yoder
        !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1460 1a61a9ae Stuart Yoder
        return 0;
1461 1a61a9ae Stuart Yoder
    }
1462 1a61a9ae Stuart Yoder
1463 1a61a9ae Stuart Yoder
    return 1;
1464 1a61a9ae Stuart Yoder
}
1465 1a61a9ae Stuart Yoder
1466 1a61a9ae Stuart Yoder
int kvmppc_get_hasidle(CPUPPCState *env)
1467 1a61a9ae Stuart Yoder
{
1468 1a61a9ae Stuart Yoder
    struct kvm_ppc_pvinfo pvinfo;
1469 1a61a9ae Stuart Yoder
1470 1a61a9ae Stuart Yoder
    if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1471 1a61a9ae Stuart Yoder
        (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1472 1a61a9ae Stuart Yoder
        return 1;
1473 1a61a9ae Stuart Yoder
    }
1474 1a61a9ae Stuart Yoder
1475 1a61a9ae Stuart Yoder
    return 0;
1476 1a61a9ae Stuart Yoder
}
1477 1a61a9ae Stuart Yoder
1478 1328c2bf Andreas Färber
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
1479 45024f09 Alexander Graf
{
1480 45024f09 Alexander Graf
    uint32_t *hc = (uint32_t*)buf;
1481 45024f09 Alexander Graf
    struct kvm_ppc_pvinfo pvinfo;
1482 45024f09 Alexander Graf
1483 1a61a9ae Stuart Yoder
    if (!kvmppc_get_pvinfo(env, &pvinfo)) {
1484 45024f09 Alexander Graf
        memcpy(buf, pvinfo.hcall, buf_len);
1485 45024f09 Alexander Graf
        return 0;
1486 45024f09 Alexander Graf
    }
1487 45024f09 Alexander Graf
1488 45024f09 Alexander Graf
    /*
1489 45024f09 Alexander Graf
     * Fallback to always fail hypercalls:
1490 45024f09 Alexander Graf
     *
1491 45024f09 Alexander Graf
     *     li r3, -1
1492 45024f09 Alexander Graf
     *     nop
1493 45024f09 Alexander Graf
     *     nop
1494 45024f09 Alexander Graf
     *     nop
1495 45024f09 Alexander Graf
     */
1496 45024f09 Alexander Graf
1497 45024f09 Alexander Graf
    hc[0] = 0x3860ffff;
1498 45024f09 Alexander Graf
    hc[1] = 0x60000000;
1499 45024f09 Alexander Graf
    hc[2] = 0x60000000;
1500 45024f09 Alexander Graf
    hc[3] = 0x60000000;
1501 45024f09 Alexander Graf
1502 45024f09 Alexander Graf
    return 0;
1503 45024f09 Alexander Graf
}
1504 45024f09 Alexander Graf
1505 1bc22652 Andreas Färber
void kvmppc_set_papr(PowerPCCPU *cpu)
1506 f61b4bed Alexander Graf
{
1507 1bc22652 Andreas Färber
    CPUPPCState *env = &cpu->env;
1508 1bc22652 Andreas Färber
    CPUState *cs = CPU(cpu);
1509 94135e81 Alexander Graf
    struct kvm_enable_cap cap = {};
1510 f61b4bed Alexander Graf
    int ret;
1511 f61b4bed Alexander Graf
1512 f61b4bed Alexander Graf
    cap.cap = KVM_CAP_PPC_PAPR;
1513 1bc22652 Andreas Färber
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
1514 f61b4bed Alexander Graf
1515 f61b4bed Alexander Graf
    if (ret) {
1516 f1af19d7 David Gibson
        cpu_abort(env, "This KVM version does not support PAPR\n");
1517 94135e81 Alexander Graf
    }
1518 9b00ea49 David Gibson
1519 9b00ea49 David Gibson
    /* Update the capability flag so we sync the right information
1520 9b00ea49 David Gibson
     * with kvm */
1521 9b00ea49 David Gibson
    cap_papr = 1;
1522 f61b4bed Alexander Graf
}
1523 f61b4bed Alexander Graf
1524 5b95b8b9 Alexander Graf
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
1525 5b95b8b9 Alexander Graf
{
1526 5b95b8b9 Alexander Graf
    CPUPPCState *env = &cpu->env;
1527 5b95b8b9 Alexander Graf
    CPUState *cs = CPU(cpu);
1528 5b95b8b9 Alexander Graf
    struct kvm_enable_cap cap = {};
1529 5b95b8b9 Alexander Graf
    int ret;
1530 5b95b8b9 Alexander Graf
1531 5b95b8b9 Alexander Graf
    cap.cap = KVM_CAP_PPC_EPR;
1532 5b95b8b9 Alexander Graf
    cap.args[0] = mpic_proxy;
1533 5b95b8b9 Alexander Graf
    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
1534 5b95b8b9 Alexander Graf
1535 5b95b8b9 Alexander Graf
    if (ret && mpic_proxy) {
1536 5b95b8b9 Alexander Graf
        cpu_abort(env, "This KVM version does not support EPR\n");
1537 5b95b8b9 Alexander Graf
    }
1538 5b95b8b9 Alexander Graf
}
1539 5b95b8b9 Alexander Graf
1540 e97c3636 David Gibson
int kvmppc_smt_threads(void)
1541 e97c3636 David Gibson
{
1542 e97c3636 David Gibson
    return cap_ppc_smt ? cap_ppc_smt : 1;
1543 e97c3636 David Gibson
}
1544 e97c3636 David Gibson
1545 7f763a5d David Gibson
#ifdef TARGET_PPC64
1546 354ac20a David Gibson
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
1547 354ac20a David Gibson
{
1548 354ac20a David Gibson
    void *rma;
1549 354ac20a David Gibson
    off_t size;
1550 354ac20a David Gibson
    int fd;
1551 354ac20a David Gibson
    struct kvm_allocate_rma ret;
1552 354ac20a David Gibson
    MemoryRegion *rma_region;
1553 354ac20a David Gibson
1554 354ac20a David Gibson
    /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
1555 354ac20a David Gibson
     * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
1556 354ac20a David Gibson
     *                      not necessary on this hardware
1557 354ac20a David Gibson
     * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
1558 354ac20a David Gibson
     *
1559 354ac20a David Gibson
     * FIXME: We should allow the user to force contiguous RMA
1560 354ac20a David Gibson
     * allocation in the cap_ppc_rma==1 case.
1561 354ac20a David Gibson
     */
1562 354ac20a David Gibson
    if (cap_ppc_rma < 2) {
1563 354ac20a David Gibson
        return 0;
1564 354ac20a David Gibson
    }
1565 354ac20a David Gibson
1566 354ac20a David Gibson
    fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
1567 354ac20a David Gibson
    if (fd < 0) {
1568 354ac20a David Gibson
        fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
1569 354ac20a David Gibson
                strerror(errno));
1570 354ac20a David Gibson
        return -1;
1571 354ac20a David Gibson
    }
1572 354ac20a David Gibson
1573 354ac20a David Gibson
    size = MIN(ret.rma_size, 256ul << 20);
1574 354ac20a David Gibson
1575 354ac20a David Gibson
    rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1576 354ac20a David Gibson
    if (rma == MAP_FAILED) {
1577 354ac20a David Gibson
        fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
1578 354ac20a David Gibson
        return -1;
1579 354ac20a David Gibson
    };
1580 354ac20a David Gibson
1581 354ac20a David Gibson
    rma_region = g_new(MemoryRegion, 1);
1582 2c9b15ca Paolo Bonzini
    memory_region_init_ram_ptr(rma_region, NULL, name, size, rma);
1583 6148b23d Avi Kivity
    vmstate_register_ram_global(rma_region);
1584 354ac20a David Gibson
    memory_region_add_subregion(sysmem, 0, rma_region);
1585 354ac20a David Gibson
1586 354ac20a David Gibson
    return size;
1587 354ac20a David Gibson
}
1588 354ac20a David Gibson
1589 7f763a5d David Gibson
uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
1590 7f763a5d David Gibson
{
1591 f36951c1 David Gibson
    struct kvm_ppc_smmu_info info;
1592 f36951c1 David Gibson
    long rampagesize, best_page_shift;
1593 f36951c1 David Gibson
    int i;
1594 f36951c1 David Gibson
1595 7f763a5d David Gibson
    if (cap_ppc_rma >= 2) {
1596 7f763a5d David Gibson
        return current_size;
1597 7f763a5d David Gibson
    }
1598 f36951c1 David Gibson
1599 f36951c1 David Gibson
    /* Find the largest hardware supported page size that's less than
1600 f36951c1 David Gibson
     * or equal to the (logical) backing page size of guest RAM */
1601 182735ef Andreas Färber
    kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
1602 f36951c1 David Gibson
    rampagesize = getrampagesize();
1603 f36951c1 David Gibson
    best_page_shift = 0;
1604 f36951c1 David Gibson
1605 f36951c1 David Gibson
    for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
1606 f36951c1 David Gibson
        struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
1607 f36951c1 David Gibson
1608 f36951c1 David Gibson
        if (!sps->page_shift) {
1609 f36951c1 David Gibson
            continue;
1610 f36951c1 David Gibson
        }
1611 f36951c1 David Gibson
1612 f36951c1 David Gibson
        if ((sps->page_shift > best_page_shift)
1613 f36951c1 David Gibson
            && ((1UL << sps->page_shift) <= rampagesize)) {
1614 f36951c1 David Gibson
            best_page_shift = sps->page_shift;
1615 f36951c1 David Gibson
        }
1616 f36951c1 David Gibson
    }
1617 f36951c1 David Gibson
1618 7f763a5d David Gibson
    return MIN(current_size,
1619 f36951c1 David Gibson
               1ULL << (best_page_shift + hash_shift - 7));
1620 7f763a5d David Gibson
}
1621 7f763a5d David Gibson
#endif
1622 7f763a5d David Gibson
1623 0f5cb298 David Gibson
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
1624 0f5cb298 David Gibson
{
1625 0f5cb298 David Gibson
    struct kvm_create_spapr_tce args = {
1626 0f5cb298 David Gibson
        .liobn = liobn,
1627 0f5cb298 David Gibson
        .window_size = window_size,
1628 0f5cb298 David Gibson
    };
1629 0f5cb298 David Gibson
    long len;
1630 0f5cb298 David Gibson
    int fd;
1631 0f5cb298 David Gibson
    void *table;
1632 0f5cb298 David Gibson
1633 b5aec396 David Gibson
    /* Must set fd to -1 so we don't try to munmap when called for
1634 b5aec396 David Gibson
     * destroying the table, which the upper layers -will- do
1635 b5aec396 David Gibson
     */
1636 b5aec396 David Gibson
    *pfd = -1;
1637 0f5cb298 David Gibson
    if (!cap_spapr_tce) {
1638 0f5cb298 David Gibson
        return NULL;
1639 0f5cb298 David Gibson
    }
1640 0f5cb298 David Gibson
1641 0f5cb298 David Gibson
    fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
1642 0f5cb298 David Gibson
    if (fd < 0) {
1643 b5aec396 David Gibson
        fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
1644 b5aec396 David Gibson
                liobn);
1645 0f5cb298 David Gibson
        return NULL;
1646 0f5cb298 David Gibson
    }
1647 0f5cb298 David Gibson
1648 a83000f5 Anthony Liguori
    len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t);
1649 0f5cb298 David Gibson
    /* FIXME: round this up to page size */
1650 0f5cb298 David Gibson
1651 74b41e56 David Gibson
    table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
1652 0f5cb298 David Gibson
    if (table == MAP_FAILED) {
1653 b5aec396 David Gibson
        fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
1654 b5aec396 David Gibson
                liobn);
1655 0f5cb298 David Gibson
        close(fd);
1656 0f5cb298 David Gibson
        return NULL;
1657 0f5cb298 David Gibson
    }
1658 0f5cb298 David Gibson
1659 0f5cb298 David Gibson
    *pfd = fd;
1660 0f5cb298 David Gibson
    return table;
1661 0f5cb298 David Gibson
}
1662 0f5cb298 David Gibson
1663 0f5cb298 David Gibson
int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
1664 0f5cb298 David Gibson
{
1665 0f5cb298 David Gibson
    long len;
1666 0f5cb298 David Gibson
1667 0f5cb298 David Gibson
    if (fd < 0) {
1668 0f5cb298 David Gibson
        return -1;
1669 0f5cb298 David Gibson
    }
1670 0f5cb298 David Gibson
1671 a83000f5 Anthony Liguori
    len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(uint64_t);
1672 0f5cb298 David Gibson
    if ((munmap(table, len) < 0) ||
1673 0f5cb298 David Gibson
        (close(fd) < 0)) {
1674 b5aec396 David Gibson
        fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
1675 b5aec396 David Gibson
                strerror(errno));
1676 0f5cb298 David Gibson
        /* Leak the table */
1677 0f5cb298 David Gibson
    }
1678 0f5cb298 David Gibson
1679 0f5cb298 David Gibson
    return 0;
1680 0f5cb298 David Gibson
}
1681 0f5cb298 David Gibson
1682 7f763a5d David Gibson
int kvmppc_reset_htab(int shift_hint)
1683 7f763a5d David Gibson
{
1684 7f763a5d David Gibson
    uint32_t shift = shift_hint;
1685 7f763a5d David Gibson
1686 ace9a2cb David Gibson
    if (!kvm_enabled()) {
1687 ace9a2cb David Gibson
        /* Full emulation, tell caller to allocate htab itself */
1688 ace9a2cb David Gibson
        return 0;
1689 ace9a2cb David Gibson
    }
1690 ace9a2cb David Gibson
    if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
1691 7f763a5d David Gibson
        int ret;
1692 7f763a5d David Gibson
        ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
1693 ace9a2cb David Gibson
        if (ret == -ENOTTY) {
1694 ace9a2cb David Gibson
            /* At least some versions of PR KVM advertise the
1695 ace9a2cb David Gibson
             * capability, but don't implement the ioctl().  Oops.
1696 ace9a2cb David Gibson
             * Return 0 so that we allocate the htab in qemu, as is
1697 ace9a2cb David Gibson
             * correct for PR. */
1698 ace9a2cb David Gibson
            return 0;
1699 ace9a2cb David Gibson
        } else if (ret < 0) {
1700 7f763a5d David Gibson
            return ret;
1701 7f763a5d David Gibson
        }
1702 7f763a5d David Gibson
        return shift;
1703 7f763a5d David Gibson
    }
1704 7f763a5d David Gibson
1705 ace9a2cb David Gibson
    /* We have a kernel that predates the htab reset calls.  For PR
1706 ace9a2cb David Gibson
     * KVM, we need to allocate the htab ourselves, for an HV KVM of
1707 ace9a2cb David Gibson
     * this era, it has allocated a 16MB fixed size hash table
1708 ace9a2cb David Gibson
     * already.  Kernels of this era have the GET_PVINFO capability
1709 ace9a2cb David Gibson
     * only on PR, so we use this hack to determine the right
1710 ace9a2cb David Gibson
     * answer */
1711 ace9a2cb David Gibson
    if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
1712 ace9a2cb David Gibson
        /* PR - tell caller to allocate htab */
1713 ace9a2cb David Gibson
        return 0;
1714 ace9a2cb David Gibson
    } else {
1715 ace9a2cb David Gibson
        /* HV - assume 16MB kernel allocated htab */
1716 ace9a2cb David Gibson
        return 24;
1717 ace9a2cb David Gibson
    }
1718 7f763a5d David Gibson
}
1719 7f763a5d David Gibson
1720 a1e98583 David Gibson
static inline uint32_t mfpvr(void)
1721 a1e98583 David Gibson
{
1722 a1e98583 David Gibson
    uint32_t pvr;
1723 a1e98583 David Gibson
1724 a1e98583 David Gibson
    asm ("mfpvr %0"
1725 a1e98583 David Gibson
         : "=r"(pvr));
1726 a1e98583 David Gibson
    return pvr;
1727 a1e98583 David Gibson
}
1728 a1e98583 David Gibson
1729 a7342588 David Gibson
static void alter_insns(uint64_t *word, uint64_t flags, bool on)
1730 a7342588 David Gibson
{
1731 a7342588 David Gibson
    if (on) {
1732 a7342588 David Gibson
        *word |= flags;
1733 a7342588 David Gibson
    } else {
1734 a7342588 David Gibson
        *word &= ~flags;
1735 a7342588 David Gibson
    }
1736 a7342588 David Gibson
}
1737 a7342588 David Gibson
1738 2985b86b Andreas Färber
static void kvmppc_host_cpu_initfn(Object *obj)
1739 a1e98583 David Gibson
{
1740 2985b86b Andreas Färber
    assert(kvm_enabled());
1741 2985b86b Andreas Färber
}
1742 2985b86b Andreas Färber
1743 2985b86b Andreas Färber
static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
1744 2985b86b Andreas Färber
{
1745 2985b86b Andreas Färber
    PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
1746 a7342588 David Gibson
    uint32_t vmx = kvmppc_get_vmx();
1747 a7342588 David Gibson
    uint32_t dfp = kvmppc_get_dfp();
1748 0cbad81f David Gibson
    uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
1749 0cbad81f David Gibson
    uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
1750 a1e98583 David Gibson
1751 cfe34f44 Andreas Färber
    /* Now fix up the class with information we can query from the host */
1752 3bc9ccc0 Alexey Kardashevskiy
    pcc->pvr = mfpvr();
1753 a7342588 David Gibson
1754 70bca53f Alexander Graf
    if (vmx != -1) {
1755 70bca53f Alexander Graf
        /* Only override when we know what the host supports */
1756 cfe34f44 Andreas Färber
        alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
1757 cfe34f44 Andreas Färber
        alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
1758 70bca53f Alexander Graf
    }
1759 70bca53f Alexander Graf
    if (dfp != -1) {
1760 70bca53f Alexander Graf
        /* Only override when we know what the host supports */
1761 cfe34f44 Andreas Färber
        alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
1762 70bca53f Alexander Graf
    }
1763 0cbad81f David Gibson
1764 0cbad81f David Gibson
    if (dcache_size != -1) {
1765 0cbad81f David Gibson
        pcc->l1_dcache_size = dcache_size;
1766 0cbad81f David Gibson
    }
1767 0cbad81f David Gibson
1768 0cbad81f David Gibson
    if (icache_size != -1) {
1769 0cbad81f David Gibson
        pcc->l1_icache_size = icache_size;
1770 0cbad81f David Gibson
    }
1771 a1e98583 David Gibson
}
1772 a1e98583 David Gibson
1773 55e5c285 Andreas Färber
int kvmppc_fixup_cpu(PowerPCCPU *cpu)
1774 12b1143b David Gibson
{
1775 55e5c285 Andreas Färber
    CPUState *cs = CPU(cpu);
1776 12b1143b David Gibson
    int smt;
1777 12b1143b David Gibson
1778 12b1143b David Gibson
    /* Adjust cpu index for SMT */
1779 12b1143b David Gibson
    smt = kvmppc_smt_threads();
1780 55e5c285 Andreas Färber
    cs->cpu_index = (cs->cpu_index / smp_threads) * smt
1781 55e5c285 Andreas Färber
        + (cs->cpu_index % smp_threads);
1782 12b1143b David Gibson
1783 12b1143b David Gibson
    return 0;
1784 12b1143b David Gibson
}
1785 12b1143b David Gibson
1786 3b961124 Stuart Yoder
bool kvmppc_has_cap_epr(void)
1787 3b961124 Stuart Yoder
{
1788 3b961124 Stuart Yoder
    return cap_epr;
1789 3b961124 Stuart Yoder
}
1790 3b961124 Stuart Yoder
1791 7c43bca0 Aneesh Kumar K.V
bool kvmppc_has_cap_htab_fd(void)
1792 7c43bca0 Aneesh Kumar K.V
{
1793 7c43bca0 Aneesh Kumar K.V
    return cap_htab_fd;
1794 7c43bca0 Aneesh Kumar K.V
}
1795 7c43bca0 Aneesh Kumar K.V
1796 5ba4576b Andreas Färber
static int kvm_ppc_register_host_cpu_type(void)
1797 5ba4576b Andreas Färber
{
1798 5ba4576b Andreas Färber
    TypeInfo type_info = {
1799 5ba4576b Andreas Färber
        .name = TYPE_HOST_POWERPC_CPU,
1800 5ba4576b Andreas Färber
        .instance_init = kvmppc_host_cpu_initfn,
1801 5ba4576b Andreas Färber
        .class_init = kvmppc_host_cpu_class_init,
1802 5ba4576b Andreas Färber
    };
1803 5ba4576b Andreas Färber
    uint32_t host_pvr = mfpvr();
1804 5ba4576b Andreas Färber
    PowerPCCPUClass *pvr_pcc;
1805 5ba4576b Andreas Färber
1806 5ba4576b Andreas Färber
    pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
1807 5ba4576b Andreas Färber
    if (pvr_pcc == NULL) {
1808 3bc9ccc0 Alexey Kardashevskiy
        pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
1809 3bc9ccc0 Alexey Kardashevskiy
    }
1810 3bc9ccc0 Alexey Kardashevskiy
    if (pvr_pcc == NULL) {
1811 5ba4576b Andreas Färber
        return -1;
1812 5ba4576b Andreas Färber
    }
1813 5ba4576b Andreas Färber
    type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
1814 5ba4576b Andreas Färber
    type_register(&type_info);
1815 5ba4576b Andreas Färber
    return 0;
1816 5ba4576b Andreas Färber
}
1817 5ba4576b Andreas Färber
1818 feaa64c4 David Gibson
int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
1819 feaa64c4 David Gibson
{
1820 feaa64c4 David Gibson
    struct kvm_rtas_token_args args = {
1821 feaa64c4 David Gibson
        .token = token,
1822 feaa64c4 David Gibson
    };
1823 feaa64c4 David Gibson
1824 feaa64c4 David Gibson
    if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
1825 feaa64c4 David Gibson
        return -ENOENT;
1826 feaa64c4 David Gibson
    }
1827 feaa64c4 David Gibson
1828 feaa64c4 David Gibson
    strncpy(args.name, function, sizeof(args.name));
1829 feaa64c4 David Gibson
1830 feaa64c4 David Gibson
    return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
1831 feaa64c4 David Gibson
}
1832 12b1143b David Gibson
1833 e68cb8b4 Alexey Kardashevskiy
int kvmppc_get_htab_fd(bool write)
1834 e68cb8b4 Alexey Kardashevskiy
{
1835 e68cb8b4 Alexey Kardashevskiy
    struct kvm_get_htab_fd s = {
1836 e68cb8b4 Alexey Kardashevskiy
        .flags = write ? KVM_GET_HTAB_WRITE : 0,
1837 e68cb8b4 Alexey Kardashevskiy
        .start_index = 0,
1838 e68cb8b4 Alexey Kardashevskiy
    };
1839 e68cb8b4 Alexey Kardashevskiy
1840 e68cb8b4 Alexey Kardashevskiy
    if (!cap_htab_fd) {
1841 e68cb8b4 Alexey Kardashevskiy
        fprintf(stderr, "KVM version doesn't support saving the hash table\n");
1842 e68cb8b4 Alexey Kardashevskiy
        return -1;
1843 e68cb8b4 Alexey Kardashevskiy
    }
1844 e68cb8b4 Alexey Kardashevskiy
1845 e68cb8b4 Alexey Kardashevskiy
    return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
1846 e68cb8b4 Alexey Kardashevskiy
}
1847 e68cb8b4 Alexey Kardashevskiy
1848 e68cb8b4 Alexey Kardashevskiy
int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
1849 e68cb8b4 Alexey Kardashevskiy
{
1850 bc72ad67 Alex Bligh
    int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1851 e68cb8b4 Alexey Kardashevskiy
    uint8_t buf[bufsize];
1852 e68cb8b4 Alexey Kardashevskiy
    ssize_t rc;
1853 e68cb8b4 Alexey Kardashevskiy
1854 e68cb8b4 Alexey Kardashevskiy
    do {
1855 e68cb8b4 Alexey Kardashevskiy
        rc = read(fd, buf, bufsize);
1856 e68cb8b4 Alexey Kardashevskiy
        if (rc < 0) {
1857 e68cb8b4 Alexey Kardashevskiy
            fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
1858 e68cb8b4 Alexey Kardashevskiy
                    strerror(errno));
1859 e68cb8b4 Alexey Kardashevskiy
            return rc;
1860 e68cb8b4 Alexey Kardashevskiy
        } else if (rc) {
1861 e68cb8b4 Alexey Kardashevskiy
            /* Kernel already retuns data in BE format for the file */
1862 e68cb8b4 Alexey Kardashevskiy
            qemu_put_buffer(f, buf, rc);
1863 e68cb8b4 Alexey Kardashevskiy
        }
1864 e68cb8b4 Alexey Kardashevskiy
    } while ((rc != 0)
1865 e68cb8b4 Alexey Kardashevskiy
             && ((max_ns < 0)
1866 bc72ad67 Alex Bligh
                 || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
1867 e68cb8b4 Alexey Kardashevskiy
1868 e68cb8b4 Alexey Kardashevskiy
    return (rc == 0) ? 1 : 0;
1869 e68cb8b4 Alexey Kardashevskiy
}
1870 e68cb8b4 Alexey Kardashevskiy
1871 e68cb8b4 Alexey Kardashevskiy
int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
1872 e68cb8b4 Alexey Kardashevskiy
                           uint16_t n_valid, uint16_t n_invalid)
1873 e68cb8b4 Alexey Kardashevskiy
{
1874 e68cb8b4 Alexey Kardashevskiy
    struct kvm_get_htab_header *buf;
1875 e68cb8b4 Alexey Kardashevskiy
    size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
1876 e68cb8b4 Alexey Kardashevskiy
    ssize_t rc;
1877 e68cb8b4 Alexey Kardashevskiy
1878 e68cb8b4 Alexey Kardashevskiy
    buf = alloca(chunksize);
1879 e68cb8b4 Alexey Kardashevskiy
    /* This is KVM on ppc, so this is all big-endian */
1880 e68cb8b4 Alexey Kardashevskiy
    buf->index = index;
1881 e68cb8b4 Alexey Kardashevskiy
    buf->n_valid = n_valid;
1882 e68cb8b4 Alexey Kardashevskiy
    buf->n_invalid = n_invalid;
1883 e68cb8b4 Alexey Kardashevskiy
1884 e68cb8b4 Alexey Kardashevskiy
    qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
1885 e68cb8b4 Alexey Kardashevskiy
1886 e68cb8b4 Alexey Kardashevskiy
    rc = write(fd, buf, chunksize);
1887 e68cb8b4 Alexey Kardashevskiy
    if (rc < 0) {
1888 e68cb8b4 Alexey Kardashevskiy
        fprintf(stderr, "Error writing KVM hash table: %s\n",
1889 e68cb8b4 Alexey Kardashevskiy
                strerror(errno));
1890 e68cb8b4 Alexey Kardashevskiy
        return rc;
1891 e68cb8b4 Alexey Kardashevskiy
    }
1892 e68cb8b4 Alexey Kardashevskiy
    if (rc != chunksize) {
1893 e68cb8b4 Alexey Kardashevskiy
        /* We should never get a short write on a single chunk */
1894 e68cb8b4 Alexey Kardashevskiy
        fprintf(stderr, "Short write, restoring KVM hash table\n");
1895 e68cb8b4 Alexey Kardashevskiy
        return -1;
1896 e68cb8b4 Alexey Kardashevskiy
    }
1897 e68cb8b4 Alexey Kardashevskiy
    return 0;
1898 e68cb8b4 Alexey Kardashevskiy
}
1899 e68cb8b4 Alexey Kardashevskiy
1900 20d695a9 Andreas Färber
bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1901 4513d923 Gleb Natapov
{
1902 4513d923 Gleb Natapov
    return true;
1903 4513d923 Gleb Natapov
}
1904 a1b87fe0 Jan Kiszka
1905 20d695a9 Andreas Färber
int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
1906 a1b87fe0 Jan Kiszka
{
1907 a1b87fe0 Jan Kiszka
    return 1;
1908 a1b87fe0 Jan Kiszka
}
1909 a1b87fe0 Jan Kiszka
1910 a1b87fe0 Jan Kiszka
int kvm_arch_on_sigbus(int code, void *addr)
1911 a1b87fe0 Jan Kiszka
{
1912 a1b87fe0 Jan Kiszka
    return 1;
1913 a1b87fe0 Jan Kiszka
}
1914 82169660 Scott Wood
1915 82169660 Scott Wood
void kvm_arch_init_irq_routing(KVMState *s)
1916 82169660 Scott Wood
{
1917 82169660 Scott Wood
}
1918 c65f9a07 Greg Kurz
1919 c65f9a07 Greg Kurz
int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
1920 c65f9a07 Greg Kurz
{
1921 c65f9a07 Greg Kurz
    return -EINVAL;
1922 c65f9a07 Greg Kurz
}
1923 c65f9a07 Greg Kurz
1924 c65f9a07 Greg Kurz
int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
1925 c65f9a07 Greg Kurz
{
1926 c65f9a07 Greg Kurz
    return -EINVAL;
1927 c65f9a07 Greg Kurz
}
1928 c65f9a07 Greg Kurz
1929 c65f9a07 Greg Kurz
int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
1930 c65f9a07 Greg Kurz
{
1931 c65f9a07 Greg Kurz
    return -EINVAL;
1932 c65f9a07 Greg Kurz
}
1933 c65f9a07 Greg Kurz
1934 c65f9a07 Greg Kurz
int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type)
1935 c65f9a07 Greg Kurz
{
1936 c65f9a07 Greg Kurz
    return -EINVAL;
1937 c65f9a07 Greg Kurz
}
1938 c65f9a07 Greg Kurz
1939 c65f9a07 Greg Kurz
void kvm_arch_remove_all_hw_breakpoints(void)
1940 c65f9a07 Greg Kurz
{
1941 c65f9a07 Greg Kurz
}
1942 c65f9a07 Greg Kurz
1943 c65f9a07 Greg Kurz
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
1944 c65f9a07 Greg Kurz
{
1945 c65f9a07 Greg Kurz
}
1946 7c43bca0 Aneesh Kumar K.V
1947 7c43bca0 Aneesh Kumar K.V
struct kvm_get_htab_buf {
1948 7c43bca0 Aneesh Kumar K.V
    struct kvm_get_htab_header header;
1949 7c43bca0 Aneesh Kumar K.V
    /*
1950 7c43bca0 Aneesh Kumar K.V
     * We require one extra byte for read
1951 7c43bca0 Aneesh Kumar K.V
     */
1952 7c43bca0 Aneesh Kumar K.V
    target_ulong hpte[(HPTES_PER_GROUP * 2) + 1];
1953 7c43bca0 Aneesh Kumar K.V
};
1954 7c43bca0 Aneesh Kumar K.V
1955 7c43bca0 Aneesh Kumar K.V
uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index)
1956 7c43bca0 Aneesh Kumar K.V
{
1957 7c43bca0 Aneesh Kumar K.V
    int htab_fd;
1958 7c43bca0 Aneesh Kumar K.V
    struct kvm_get_htab_fd ghf;
1959 7c43bca0 Aneesh Kumar K.V
    struct kvm_get_htab_buf  *hpte_buf;
1960 7c43bca0 Aneesh Kumar K.V
1961 7c43bca0 Aneesh Kumar K.V
    ghf.flags = 0;
1962 7c43bca0 Aneesh Kumar K.V
    ghf.start_index = pte_index;
1963 7c43bca0 Aneesh Kumar K.V
    htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
1964 7c43bca0 Aneesh Kumar K.V
    if (htab_fd < 0) {
1965 7c43bca0 Aneesh Kumar K.V
        goto error_out;
1966 7c43bca0 Aneesh Kumar K.V
    }
1967 7c43bca0 Aneesh Kumar K.V
1968 7c43bca0 Aneesh Kumar K.V
    hpte_buf = g_malloc0(sizeof(*hpte_buf));
1969 7c43bca0 Aneesh Kumar K.V
    /*
1970 7c43bca0 Aneesh Kumar K.V
     * Read the hpte group
1971 7c43bca0 Aneesh Kumar K.V
     */
1972 7c43bca0 Aneesh Kumar K.V
    if (read(htab_fd, hpte_buf, sizeof(*hpte_buf)) < 0) {
1973 7c43bca0 Aneesh Kumar K.V
        goto out_close;
1974 7c43bca0 Aneesh Kumar K.V
    }
1975 7c43bca0 Aneesh Kumar K.V
1976 7c43bca0 Aneesh Kumar K.V
    close(htab_fd);
1977 7c43bca0 Aneesh Kumar K.V
    return (uint64_t)(uintptr_t) hpte_buf->hpte;
1978 7c43bca0 Aneesh Kumar K.V
1979 7c43bca0 Aneesh Kumar K.V
out_close:
1980 7c43bca0 Aneesh Kumar K.V
    g_free(hpte_buf);
1981 7c43bca0 Aneesh Kumar K.V
    close(htab_fd);
1982 7c43bca0 Aneesh Kumar K.V
error_out:
1983 7c43bca0 Aneesh Kumar K.V
    return 0;
1984 7c43bca0 Aneesh Kumar K.V
}
1985 7c43bca0 Aneesh Kumar K.V
1986 7c43bca0 Aneesh Kumar K.V
void kvmppc_hash64_free_pteg(uint64_t token)
1987 7c43bca0 Aneesh Kumar K.V
{
1988 7c43bca0 Aneesh Kumar K.V
    struct kvm_get_htab_buf *htab_buf;
1989 7c43bca0 Aneesh Kumar K.V
1990 7c43bca0 Aneesh Kumar K.V
    htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf,
1991 7c43bca0 Aneesh Kumar K.V
                            hpte);
1992 7c43bca0 Aneesh Kumar K.V
    g_free(htab_buf);
1993 7c43bca0 Aneesh Kumar K.V
    return;
1994 7c43bca0 Aneesh Kumar K.V
}