root / target-ppc / kvm.c @ be40edcd
History | View | Annotate | Download (23.2 kB)
1 | d76d1650 | aurel32 | /*
|
---|---|---|---|
2 | d76d1650 | aurel32 | * PowerPC implementation of KVM hooks
|
3 | d76d1650 | aurel32 | *
|
4 | d76d1650 | aurel32 | * Copyright IBM Corp. 2007
|
5 | 90dc8812 | Scott Wood | * Copyright (C) 2011 Freescale Semiconductor, Inc.
|
6 | d76d1650 | aurel32 | *
|
7 | d76d1650 | aurel32 | * Authors:
|
8 | d76d1650 | aurel32 | * Jerone Young <jyoung5@us.ibm.com>
|
9 | d76d1650 | aurel32 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
|
10 | d76d1650 | aurel32 | * Hollis Blanchard <hollisb@us.ibm.com>
|
11 | d76d1650 | aurel32 | *
|
12 | d76d1650 | aurel32 | * This work is licensed under the terms of the GNU GPL, version 2 or later.
|
13 | d76d1650 | aurel32 | * See the COPYING file in the top-level directory.
|
14 | d76d1650 | aurel32 | *
|
15 | d76d1650 | aurel32 | */
|
16 | d76d1650 | aurel32 | |
17 | eadaada1 | Alexander Graf | #include <dirent.h> |
18 | d76d1650 | aurel32 | #include <sys/types.h> |
19 | d76d1650 | aurel32 | #include <sys/ioctl.h> |
20 | d76d1650 | aurel32 | #include <sys/mman.h> |
21 | d76d1650 | aurel32 | |
22 | d76d1650 | aurel32 | #include <linux/kvm.h> |
23 | d76d1650 | aurel32 | |
24 | d76d1650 | aurel32 | #include "qemu-common.h" |
25 | d76d1650 | aurel32 | #include "qemu-timer.h" |
26 | d76d1650 | aurel32 | #include "sysemu.h" |
27 | d76d1650 | aurel32 | #include "kvm.h" |
28 | d76d1650 | aurel32 | #include "kvm_ppc.h" |
29 | d76d1650 | aurel32 | #include "cpu.h" |
30 | d76d1650 | aurel32 | #include "device_tree.h" |
31 | 0f5cb298 | David Gibson | #include "hw/sysbus.h" |
32 | e97c3636 | David Gibson | #include "hw/spapr.h" |
33 | d76d1650 | aurel32 | |
34 | f61b4bed | Alexander Graf | #include "hw/sysbus.h" |
35 | f61b4bed | Alexander Graf | #include "hw/spapr.h" |
36 | f61b4bed | Alexander Graf | #include "hw/spapr_vio.h" |
37 | f61b4bed | Alexander Graf | |
38 | d76d1650 | aurel32 | //#define DEBUG_KVM
|
39 | d76d1650 | aurel32 | |
40 | d76d1650 | aurel32 | #ifdef DEBUG_KVM
|
41 | d76d1650 | aurel32 | #define dprintf(fmt, ...) \
|
42 | d76d1650 | aurel32 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
43 | d76d1650 | aurel32 | #else
|
44 | d76d1650 | aurel32 | #define dprintf(fmt, ...) \
|
45 | d76d1650 | aurel32 | do { } while (0) |
46 | d76d1650 | aurel32 | #endif
|
47 | d76d1650 | aurel32 | |
48 | eadaada1 | Alexander Graf | #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/" |
49 | eadaada1 | Alexander Graf | |
50 | 94a8d39a | Jan Kiszka | const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
51 | 94a8d39a | Jan Kiszka | KVM_CAP_LAST_INFO |
52 | 94a8d39a | Jan Kiszka | }; |
53 | 94a8d39a | Jan Kiszka | |
54 | fc87e185 | Alexander Graf | static int cap_interrupt_unset = false; |
55 | fc87e185 | Alexander Graf | static int cap_interrupt_level = false; |
56 | 90dc8812 | Scott Wood | static int cap_segstate; |
57 | 90dc8812 | Scott Wood | static int cap_booke_sregs; |
58 | e97c3636 | David Gibson | static int cap_ppc_smt; |
59 | 354ac20a | David Gibson | static int cap_ppc_rma; |
60 | 0f5cb298 | David Gibson | static int cap_spapr_tce; |
61 | fc87e185 | Alexander Graf | |
62 | c821c2bd | Alexander Graf | /* XXX We have a race condition where we actually have a level triggered
|
63 | c821c2bd | Alexander Graf | * interrupt, but the infrastructure can't expose that yet, so the guest
|
64 | c821c2bd | Alexander Graf | * takes but ignores it, goes to sleep and never gets notified that there's
|
65 | c821c2bd | Alexander Graf | * still an interrupt pending.
|
66 | c6a94ba5 | Alexander Graf | *
|
67 | c821c2bd | Alexander Graf | * As a quick workaround, let's just wake up again 20 ms after we injected
|
68 | c821c2bd | Alexander Graf | * an interrupt. That way we can assure that we're always reinjecting
|
69 | c821c2bd | Alexander Graf | * interrupts in case the guest swallowed them.
|
70 | c6a94ba5 | Alexander Graf | */
|
71 | c6a94ba5 | Alexander Graf | static QEMUTimer *idle_timer;
|
72 | c6a94ba5 | Alexander Graf | |
73 | c821c2bd | Alexander Graf | static void kvm_kick_env(void *env) |
74 | c6a94ba5 | Alexander Graf | { |
75 | c821c2bd | Alexander Graf | qemu_cpu_kick(env); |
76 | c6a94ba5 | Alexander Graf | } |
77 | c6a94ba5 | Alexander Graf | |
78 | cad1e282 | Jan Kiszka | int kvm_arch_init(KVMState *s)
|
79 | d76d1650 | aurel32 | { |
80 | fc87e185 | Alexander Graf | cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ); |
81 | fc87e185 | Alexander Graf | cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL); |
82 | 90dc8812 | Scott Wood | cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); |
83 | 90dc8812 | Scott Wood | cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); |
84 | e97c3636 | David Gibson | cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT); |
85 | 354ac20a | David Gibson | cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA); |
86 | 0f5cb298 | David Gibson | cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE); |
87 | fc87e185 | Alexander Graf | |
88 | fc87e185 | Alexander Graf | if (!cap_interrupt_level) {
|
89 | fc87e185 | Alexander Graf | fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
|
90 | fc87e185 | Alexander Graf | "VM to stall at times!\n");
|
91 | fc87e185 | Alexander Graf | } |
92 | fc87e185 | Alexander Graf | |
93 | d76d1650 | aurel32 | return 0; |
94 | d76d1650 | aurel32 | } |
95 | d76d1650 | aurel32 | |
96 | 5666ca4a | Scott Wood | static int kvm_arch_sync_sregs(CPUState *cenv) |
97 | d76d1650 | aurel32 | { |
98 | 861bbc80 | Alexander Graf | struct kvm_sregs sregs;
|
99 | 5666ca4a | Scott Wood | int ret;
|
100 | 5666ca4a | Scott Wood | |
101 | 5666ca4a | Scott Wood | if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
|
102 | 64e07be5 | Alexander Graf | /* What we're really trying to say is "if we're on BookE, we use
|
103 | 64e07be5 | Alexander Graf | the native PVR for now". This is the only sane way to check
|
104 | 64e07be5 | Alexander Graf | it though, so we potentially confuse users that they can run
|
105 | 64e07be5 | Alexander Graf | BookE guests on BookS. Let's hope nobody dares enough :) */
|
106 | 5666ca4a | Scott Wood | return 0; |
107 | 5666ca4a | Scott Wood | } else {
|
108 | 90dc8812 | Scott Wood | if (!cap_segstate) {
|
109 | 64e07be5 | Alexander Graf | fprintf(stderr, "kvm error: missing PVR setting capability\n");
|
110 | 64e07be5 | Alexander Graf | return -ENOSYS;
|
111 | 5666ca4a | Scott Wood | } |
112 | 5666ca4a | Scott Wood | } |
113 | 5666ca4a | Scott Wood | |
114 | 5666ca4a | Scott Wood | ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs); |
115 | 5666ca4a | Scott Wood | if (ret) {
|
116 | 5666ca4a | Scott Wood | return ret;
|
117 | 5666ca4a | Scott Wood | } |
118 | 861bbc80 | Alexander Graf | |
119 | 861bbc80 | Alexander Graf | sregs.pvr = cenv->spr[SPR_PVR]; |
120 | 5666ca4a | Scott Wood | return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
|
121 | 5666ca4a | Scott Wood | } |
122 | 5666ca4a | Scott Wood | |
123 | 93dd5e85 | Scott Wood | /* Set up a shared TLB array with KVM */
|
124 | 93dd5e85 | Scott Wood | static int kvm_booke206_tlb_init(CPUState *env) |
125 | 93dd5e85 | Scott Wood | { |
126 | 93dd5e85 | Scott Wood | struct kvm_book3e_206_tlb_params params = {};
|
127 | 93dd5e85 | Scott Wood | struct kvm_config_tlb cfg = {};
|
128 | 93dd5e85 | Scott Wood | struct kvm_enable_cap encap = {};
|
129 | 93dd5e85 | Scott Wood | unsigned int entries = 0; |
130 | 93dd5e85 | Scott Wood | int ret, i;
|
131 | 93dd5e85 | Scott Wood | |
132 | 93dd5e85 | Scott Wood | if (!kvm_enabled() ||
|
133 | 93dd5e85 | Scott Wood | !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) { |
134 | 93dd5e85 | Scott Wood | return 0; |
135 | 93dd5e85 | Scott Wood | } |
136 | 93dd5e85 | Scott Wood | |
137 | 93dd5e85 | Scott Wood | assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN); |
138 | 93dd5e85 | Scott Wood | |
139 | 93dd5e85 | Scott Wood | for (i = 0; i < BOOKE206_MAX_TLBN; i++) { |
140 | 93dd5e85 | Scott Wood | params.tlb_sizes[i] = booke206_tlb_size(env, i); |
141 | 93dd5e85 | Scott Wood | params.tlb_ways[i] = booke206_tlb_ways(env, i); |
142 | 93dd5e85 | Scott Wood | entries += params.tlb_sizes[i]; |
143 | 93dd5e85 | Scott Wood | } |
144 | 93dd5e85 | Scott Wood | |
145 | 93dd5e85 | Scott Wood | assert(entries == env->nb_tlb); |
146 | 93dd5e85 | Scott Wood | assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t)); |
147 | 93dd5e85 | Scott Wood | |
148 | 93dd5e85 | Scott Wood | env->tlb_dirty = true;
|
149 | 93dd5e85 | Scott Wood | |
150 | 93dd5e85 | Scott Wood | cfg.array = (uintptr_t)env->tlb.tlbm; |
151 | 93dd5e85 | Scott Wood | cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
|
152 | 93dd5e85 | Scott Wood | cfg.params = (uintptr_t)¶ms; |
153 | 93dd5e85 | Scott Wood | cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV; |
154 | 93dd5e85 | Scott Wood | |
155 | 93dd5e85 | Scott Wood | encap.cap = KVM_CAP_SW_TLB; |
156 | 93dd5e85 | Scott Wood | encap.args[0] = (uintptr_t)&cfg;
|
157 | 93dd5e85 | Scott Wood | |
158 | 93dd5e85 | Scott Wood | ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap); |
159 | 93dd5e85 | Scott Wood | if (ret < 0) { |
160 | 93dd5e85 | Scott Wood | fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
|
161 | 93dd5e85 | Scott Wood | __func__, strerror(-ret)); |
162 | 93dd5e85 | Scott Wood | return ret;
|
163 | 93dd5e85 | Scott Wood | } |
164 | 93dd5e85 | Scott Wood | |
165 | 93dd5e85 | Scott Wood | env->kvm_sw_tlb = true;
|
166 | 93dd5e85 | Scott Wood | return 0; |
167 | 93dd5e85 | Scott Wood | } |
168 | 93dd5e85 | Scott Wood | |
169 | 5666ca4a | Scott Wood | int kvm_arch_init_vcpu(CPUState *cenv)
|
170 | 5666ca4a | Scott Wood | { |
171 | 5666ca4a | Scott Wood | int ret;
|
172 | 5666ca4a | Scott Wood | |
173 | 5666ca4a | Scott Wood | ret = kvm_arch_sync_sregs(cenv); |
174 | 5666ca4a | Scott Wood | if (ret) {
|
175 | 5666ca4a | Scott Wood | return ret;
|
176 | 5666ca4a | Scott Wood | } |
177 | 861bbc80 | Alexander Graf | |
178 | 74475455 | Paolo Bonzini | idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv); |
179 | c821c2bd | Alexander Graf | |
180 | 93dd5e85 | Scott Wood | /* Some targets support access to KVM's guest TLB. */
|
181 | 93dd5e85 | Scott Wood | switch (cenv->mmu_model) {
|
182 | 93dd5e85 | Scott Wood | case POWERPC_MMU_BOOKE206:
|
183 | 93dd5e85 | Scott Wood | ret = kvm_booke206_tlb_init(cenv); |
184 | 93dd5e85 | Scott Wood | break;
|
185 | 93dd5e85 | Scott Wood | default:
|
186 | 93dd5e85 | Scott Wood | break;
|
187 | 93dd5e85 | Scott Wood | } |
188 | 93dd5e85 | Scott Wood | |
189 | 861bbc80 | Alexander Graf | return ret;
|
190 | d76d1650 | aurel32 | } |
191 | d76d1650 | aurel32 | |
192 | caa5af0f | Jan Kiszka | void kvm_arch_reset_vcpu(CPUState *env)
|
193 | caa5af0f | Jan Kiszka | { |
194 | caa5af0f | Jan Kiszka | } |
195 | caa5af0f | Jan Kiszka | |
196 | 93dd5e85 | Scott Wood | static void kvm_sw_tlb_put(CPUState *env) |
197 | 93dd5e85 | Scott Wood | { |
198 | 93dd5e85 | Scott Wood | struct kvm_dirty_tlb dirty_tlb;
|
199 | 93dd5e85 | Scott Wood | unsigned char *bitmap; |
200 | 93dd5e85 | Scott Wood | int ret;
|
201 | 93dd5e85 | Scott Wood | |
202 | 93dd5e85 | Scott Wood | if (!env->kvm_sw_tlb) {
|
203 | 93dd5e85 | Scott Wood | return;
|
204 | 93dd5e85 | Scott Wood | } |
205 | 93dd5e85 | Scott Wood | |
206 | 93dd5e85 | Scott Wood | bitmap = g_malloc((env->nb_tlb + 7) / 8); |
207 | 93dd5e85 | Scott Wood | memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8); |
208 | 93dd5e85 | Scott Wood | |
209 | 93dd5e85 | Scott Wood | dirty_tlb.bitmap = (uintptr_t)bitmap; |
210 | 93dd5e85 | Scott Wood | dirty_tlb.num_dirty = env->nb_tlb; |
211 | 93dd5e85 | Scott Wood | |
212 | 93dd5e85 | Scott Wood | ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb); |
213 | 93dd5e85 | Scott Wood | if (ret) {
|
214 | 93dd5e85 | Scott Wood | fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
|
215 | 93dd5e85 | Scott Wood | __func__, strerror(-ret)); |
216 | 93dd5e85 | Scott Wood | } |
217 | 93dd5e85 | Scott Wood | |
218 | 93dd5e85 | Scott Wood | g_free(bitmap); |
219 | 93dd5e85 | Scott Wood | } |
220 | 93dd5e85 | Scott Wood | |
221 | ea375f9a | Jan Kiszka | int kvm_arch_put_registers(CPUState *env, int level) |
222 | d76d1650 | aurel32 | { |
223 | d76d1650 | aurel32 | struct kvm_regs regs;
|
224 | d76d1650 | aurel32 | int ret;
|
225 | d76d1650 | aurel32 | int i;
|
226 | d76d1650 | aurel32 | |
227 | d76d1650 | aurel32 | ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
228 | d76d1650 | aurel32 | if (ret < 0) |
229 | d76d1650 | aurel32 | return ret;
|
230 | d76d1650 | aurel32 | |
231 | d76d1650 | aurel32 | regs.ctr = env->ctr; |
232 | d76d1650 | aurel32 | regs.lr = env->lr; |
233 | d76d1650 | aurel32 | regs.xer = env->xer; |
234 | d76d1650 | aurel32 | regs.msr = env->msr; |
235 | d76d1650 | aurel32 | regs.pc = env->nip; |
236 | d76d1650 | aurel32 | |
237 | d76d1650 | aurel32 | regs.srr0 = env->spr[SPR_SRR0]; |
238 | d76d1650 | aurel32 | regs.srr1 = env->spr[SPR_SRR1]; |
239 | d76d1650 | aurel32 | |
240 | d76d1650 | aurel32 | regs.sprg0 = env->spr[SPR_SPRG0]; |
241 | d76d1650 | aurel32 | regs.sprg1 = env->spr[SPR_SPRG1]; |
242 | d76d1650 | aurel32 | regs.sprg2 = env->spr[SPR_SPRG2]; |
243 | d76d1650 | aurel32 | regs.sprg3 = env->spr[SPR_SPRG3]; |
244 | d76d1650 | aurel32 | regs.sprg4 = env->spr[SPR_SPRG4]; |
245 | d76d1650 | aurel32 | regs.sprg5 = env->spr[SPR_SPRG5]; |
246 | d76d1650 | aurel32 | regs.sprg6 = env->spr[SPR_SPRG6]; |
247 | d76d1650 | aurel32 | regs.sprg7 = env->spr[SPR_SPRG7]; |
248 | d76d1650 | aurel32 | |
249 | 90dc8812 | Scott Wood | regs.pid = env->spr[SPR_BOOKE_PID]; |
250 | 90dc8812 | Scott Wood | |
251 | d76d1650 | aurel32 | for (i = 0;i < 32; i++) |
252 | d76d1650 | aurel32 | regs.gpr[i] = env->gpr[i]; |
253 | d76d1650 | aurel32 | |
254 | d76d1650 | aurel32 | ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); |
255 | d76d1650 | aurel32 | if (ret < 0) |
256 | d76d1650 | aurel32 | return ret;
|
257 | d76d1650 | aurel32 | |
258 | 93dd5e85 | Scott Wood | if (env->tlb_dirty) {
|
259 | 93dd5e85 | Scott Wood | kvm_sw_tlb_put(env); |
260 | 93dd5e85 | Scott Wood | env->tlb_dirty = false;
|
261 | 93dd5e85 | Scott Wood | } |
262 | 93dd5e85 | Scott Wood | |
263 | d76d1650 | aurel32 | return ret;
|
264 | d76d1650 | aurel32 | } |
265 | d76d1650 | aurel32 | |
266 | d76d1650 | aurel32 | int kvm_arch_get_registers(CPUState *env)
|
267 | d76d1650 | aurel32 | { |
268 | d76d1650 | aurel32 | struct kvm_regs regs;
|
269 | ba5e5090 | Alexander Graf | struct kvm_sregs sregs;
|
270 | 90dc8812 | Scott Wood | uint32_t cr; |
271 | 138b38b6 | Alexander Graf | int i, ret;
|
272 | d76d1650 | aurel32 | |
273 | d76d1650 | aurel32 | ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); |
274 | d76d1650 | aurel32 | if (ret < 0) |
275 | d76d1650 | aurel32 | return ret;
|
276 | d76d1650 | aurel32 | |
277 | 90dc8812 | Scott Wood | cr = regs.cr; |
278 | 90dc8812 | Scott Wood | for (i = 7; i >= 0; i--) { |
279 | 90dc8812 | Scott Wood | env->crf[i] = cr & 15;
|
280 | 90dc8812 | Scott Wood | cr >>= 4;
|
281 | 90dc8812 | Scott Wood | } |
282 | ba5e5090 | Alexander Graf | |
283 | d76d1650 | aurel32 | env->ctr = regs.ctr; |
284 | d76d1650 | aurel32 | env->lr = regs.lr; |
285 | d76d1650 | aurel32 | env->xer = regs.xer; |
286 | d76d1650 | aurel32 | env->msr = regs.msr; |
287 | d76d1650 | aurel32 | env->nip = regs.pc; |
288 | d76d1650 | aurel32 | |
289 | d76d1650 | aurel32 | env->spr[SPR_SRR0] = regs.srr0; |
290 | d76d1650 | aurel32 | env->spr[SPR_SRR1] = regs.srr1; |
291 | d76d1650 | aurel32 | |
292 | d76d1650 | aurel32 | env->spr[SPR_SPRG0] = regs.sprg0; |
293 | d76d1650 | aurel32 | env->spr[SPR_SPRG1] = regs.sprg1; |
294 | d76d1650 | aurel32 | env->spr[SPR_SPRG2] = regs.sprg2; |
295 | d76d1650 | aurel32 | env->spr[SPR_SPRG3] = regs.sprg3; |
296 | d76d1650 | aurel32 | env->spr[SPR_SPRG4] = regs.sprg4; |
297 | d76d1650 | aurel32 | env->spr[SPR_SPRG5] = regs.sprg5; |
298 | d76d1650 | aurel32 | env->spr[SPR_SPRG6] = regs.sprg6; |
299 | d76d1650 | aurel32 | env->spr[SPR_SPRG7] = regs.sprg7; |
300 | d76d1650 | aurel32 | |
301 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_PID] = regs.pid; |
302 | 90dc8812 | Scott Wood | |
303 | d76d1650 | aurel32 | for (i = 0;i < 32; i++) |
304 | d76d1650 | aurel32 | env->gpr[i] = regs.gpr[i]; |
305 | d76d1650 | aurel32 | |
306 | 90dc8812 | Scott Wood | if (cap_booke_sregs) {
|
307 | 90dc8812 | Scott Wood | ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
308 | 90dc8812 | Scott Wood | if (ret < 0) { |
309 | 90dc8812 | Scott Wood | return ret;
|
310 | 90dc8812 | Scott Wood | } |
311 | 90dc8812 | Scott Wood | |
312 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_BASE) {
|
313 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0; |
314 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1; |
315 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr; |
316 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear; |
317 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr; |
318 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr; |
319 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr; |
320 | 90dc8812 | Scott Wood | env->spr[SPR_DECR] = sregs.u.e.dec; |
321 | 90dc8812 | Scott Wood | env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
|
322 | 90dc8812 | Scott Wood | env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
|
323 | 90dc8812 | Scott Wood | env->spr[SPR_VRSAVE] = sregs.u.e.vrsave; |
324 | 90dc8812 | Scott Wood | } |
325 | 90dc8812 | Scott Wood | |
326 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
|
327 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir; |
328 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0; |
329 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1; |
330 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar; |
331 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr; |
332 | 90dc8812 | Scott Wood | } |
333 | 90dc8812 | Scott Wood | |
334 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_64) {
|
335 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr; |
336 | 90dc8812 | Scott Wood | } |
337 | 90dc8812 | Scott Wood | |
338 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
|
339 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8; |
340 | 90dc8812 | Scott Wood | } |
341 | 90dc8812 | Scott Wood | |
342 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
|
343 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
|
344 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
|
345 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
|
346 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
|
347 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
|
348 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
|
349 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
|
350 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
|
351 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
|
352 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
|
353 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
|
354 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
|
355 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
|
356 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
|
357 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
|
358 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
|
359 | 90dc8812 | Scott Wood | |
360 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_SPE) {
|
361 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
|
362 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
|
363 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
|
364 | 90dc8812 | Scott Wood | } |
365 | 90dc8812 | Scott Wood | |
366 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_PM) {
|
367 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
|
368 | 90dc8812 | Scott Wood | } |
369 | 90dc8812 | Scott Wood | |
370 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_PC) {
|
371 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
|
372 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
|
373 | 90dc8812 | Scott Wood | } |
374 | 90dc8812 | Scott Wood | } |
375 | 90dc8812 | Scott Wood | |
376 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
|
377 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0; |
378 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1; |
379 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2; |
380 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
|
381 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4; |
382 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6; |
383 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
|
384 | 90dc8812 | Scott Wood | env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg; |
385 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
|
386 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
|
387 | 90dc8812 | Scott Wood | } |
388 | 90dc8812 | Scott Wood | |
389 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_EXP) {
|
390 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr; |
391 | 90dc8812 | Scott Wood | } |
392 | 90dc8812 | Scott Wood | |
393 | 90dc8812 | Scott Wood | if (sregs.u.e.features & KVM_SREGS_E_PD) {
|
394 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc; |
395 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc; |
396 | 90dc8812 | Scott Wood | } |
397 | 90dc8812 | Scott Wood | |
398 | 90dc8812 | Scott Wood | if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
|
399 | 90dc8812 | Scott Wood | env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr; |
400 | 90dc8812 | Scott Wood | env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar; |
401 | 90dc8812 | Scott Wood | env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0; |
402 | 90dc8812 | Scott Wood | |
403 | 90dc8812 | Scott Wood | if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
|
404 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1; |
405 | 90dc8812 | Scott Wood | env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2; |
406 | 90dc8812 | Scott Wood | } |
407 | 90dc8812 | Scott Wood | } |
408 | fafc0b6a | Alexander Graf | } |
409 | 90dc8812 | Scott Wood | |
410 | 90dc8812 | Scott Wood | if (cap_segstate) {
|
411 | 90dc8812 | Scott Wood | ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
412 | 90dc8812 | Scott Wood | if (ret < 0) { |
413 | 90dc8812 | Scott Wood | return ret;
|
414 | 90dc8812 | Scott Wood | } |
415 | 90dc8812 | Scott Wood | |
416 | bb593904 | David Gibson | ppc_store_sdr1(env, sregs.u.s.sdr1); |
417 | ba5e5090 | Alexander Graf | |
418 | ba5e5090 | Alexander Graf | /* Sync SLB */
|
419 | 82c09f2f | Alexander Graf | #ifdef TARGET_PPC64
|
420 | ba5e5090 | Alexander Graf | for (i = 0; i < 64; i++) { |
421 | ba5e5090 | Alexander Graf | ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe, |
422 | ba5e5090 | Alexander Graf | sregs.u.s.ppc64.slb[i].slbv); |
423 | ba5e5090 | Alexander Graf | } |
424 | 82c09f2f | Alexander Graf | #endif
|
425 | ba5e5090 | Alexander Graf | |
426 | ba5e5090 | Alexander Graf | /* Sync SRs */
|
427 | ba5e5090 | Alexander Graf | for (i = 0; i < 16; i++) { |
428 | ba5e5090 | Alexander Graf | env->sr[i] = sregs.u.s.ppc32.sr[i]; |
429 | ba5e5090 | Alexander Graf | } |
430 | ba5e5090 | Alexander Graf | |
431 | ba5e5090 | Alexander Graf | /* Sync BATs */
|
432 | ba5e5090 | Alexander Graf | for (i = 0; i < 8; i++) { |
433 | ba5e5090 | Alexander Graf | env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff; |
434 | ba5e5090 | Alexander Graf | env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32; |
435 | ba5e5090 | Alexander Graf | env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; |
436 | ba5e5090 | Alexander Graf | env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; |
437 | ba5e5090 | Alexander Graf | } |
438 | fafc0b6a | Alexander Graf | } |
439 | ba5e5090 | Alexander Graf | |
440 | d76d1650 | aurel32 | return 0; |
441 | d76d1650 | aurel32 | } |
442 | d76d1650 | aurel32 | |
443 | fc87e185 | Alexander Graf | int kvmppc_set_interrupt(CPUState *env, int irq, int level) |
444 | fc87e185 | Alexander Graf | { |
445 | fc87e185 | Alexander Graf | unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
|
446 | fc87e185 | Alexander Graf | |
447 | fc87e185 | Alexander Graf | if (irq != PPC_INTERRUPT_EXT) {
|
448 | fc87e185 | Alexander Graf | return 0; |
449 | fc87e185 | Alexander Graf | } |
450 | fc87e185 | Alexander Graf | |
451 | fc87e185 | Alexander Graf | if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
|
452 | fc87e185 | Alexander Graf | return 0; |
453 | fc87e185 | Alexander Graf | } |
454 | fc87e185 | Alexander Graf | |
455 | fc87e185 | Alexander Graf | kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq); |
456 | fc87e185 | Alexander Graf | |
457 | fc87e185 | Alexander Graf | return 0; |
458 | fc87e185 | Alexander Graf | } |
459 | fc87e185 | Alexander Graf | |
460 | 16415335 | Alexander Graf | #if defined(TARGET_PPCEMB)
|
461 | 16415335 | Alexander Graf | #define PPC_INPUT_INT PPC40x_INPUT_INT
|
462 | 16415335 | Alexander Graf | #elif defined(TARGET_PPC64)
|
463 | 16415335 | Alexander Graf | #define PPC_INPUT_INT PPC970_INPUT_INT
|
464 | 16415335 | Alexander Graf | #else
|
465 | 16415335 | Alexander Graf | #define PPC_INPUT_INT PPC6xx_INPUT_INT
|
466 | 16415335 | Alexander Graf | #endif
|
467 | 16415335 | Alexander Graf | |
468 | bdcbd3e2 | Jan Kiszka | void kvm_arch_pre_run(CPUState *env, struct kvm_run *run) |
469 | d76d1650 | aurel32 | { |
470 | d76d1650 | aurel32 | int r;
|
471 | d76d1650 | aurel32 | unsigned irq;
|
472 | d76d1650 | aurel32 | |
473 | d76d1650 | aurel32 | /* PowerPC Qemu tracks the various core input pins (interrupt, critical
|
474 | d76d1650 | aurel32 | * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
|
475 | fc87e185 | Alexander Graf | if (!cap_interrupt_level &&
|
476 | fc87e185 | Alexander Graf | run->ready_for_interrupt_injection && |
477 | d76d1650 | aurel32 | (env->interrupt_request & CPU_INTERRUPT_HARD) && |
478 | 16415335 | Alexander Graf | (env->irq_input_state & (1<<PPC_INPUT_INT)))
|
479 | d76d1650 | aurel32 | { |
480 | d76d1650 | aurel32 | /* For now KVM disregards the 'irq' argument. However, in the
|
481 | d76d1650 | aurel32 | * future KVM could cache it in-kernel to avoid a heavyweight exit
|
482 | d76d1650 | aurel32 | * when reading the UIC.
|
483 | d76d1650 | aurel32 | */
|
484 | fc87e185 | Alexander Graf | irq = KVM_INTERRUPT_SET; |
485 | d76d1650 | aurel32 | |
486 | d76d1650 | aurel32 | dprintf("injected interrupt %d\n", irq);
|
487 | d76d1650 | aurel32 | r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq); |
488 | d76d1650 | aurel32 | if (r < 0) |
489 | d76d1650 | aurel32 | printf("cpu %d fail inject %x\n", env->cpu_index, irq);
|
490 | c821c2bd | Alexander Graf | |
491 | c821c2bd | Alexander Graf | /* Always wake up soon in case the interrupt was level based */
|
492 | 74475455 | Paolo Bonzini | qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) + |
493 | c821c2bd | Alexander Graf | (get_ticks_per_sec() / 50));
|
494 | d76d1650 | aurel32 | } |
495 | d76d1650 | aurel32 | |
496 | d76d1650 | aurel32 | /* We don't know if there are more interrupts pending after this. However,
|
497 | d76d1650 | aurel32 | * the guest will return to userspace in the course of handling this one
|
498 | d76d1650 | aurel32 | * anyways, so we will get a chance to deliver the rest. */
|
499 | d76d1650 | aurel32 | } |
500 | d76d1650 | aurel32 | |
501 | 7a39fe58 | Jan Kiszka | void kvm_arch_post_run(CPUState *env, struct kvm_run *run) |
502 | d76d1650 | aurel32 | { |
503 | d76d1650 | aurel32 | } |
504 | d76d1650 | aurel32 | |
505 | 99036865 | Jan Kiszka | int kvm_arch_process_async_events(CPUState *env)
|
506 | 0af691d7 | Marcelo Tosatti | { |
507 | bdcbd3e2 | Jan Kiszka | return 0; |
508 | 0af691d7 | Marcelo Tosatti | } |
509 | 0af691d7 | Marcelo Tosatti | |
510 | d76d1650 | aurel32 | static int kvmppc_handle_halt(CPUState *env) |
511 | d76d1650 | aurel32 | { |
512 | d76d1650 | aurel32 | if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
|
513 | d76d1650 | aurel32 | env->halted = 1;
|
514 | d76d1650 | aurel32 | env->exception_index = EXCP_HLT; |
515 | d76d1650 | aurel32 | } |
516 | d76d1650 | aurel32 | |
517 | bb4ea393 | Jan Kiszka | return 0; |
518 | d76d1650 | aurel32 | } |
519 | d76d1650 | aurel32 | |
520 | d76d1650 | aurel32 | /* map dcr access to existing qemu dcr emulation */
|
521 | d76d1650 | aurel32 | static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data) |
522 | d76d1650 | aurel32 | { |
523 | d76d1650 | aurel32 | if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) |
524 | d76d1650 | aurel32 | fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
|
525 | d76d1650 | aurel32 | |
526 | bb4ea393 | Jan Kiszka | return 0; |
527 | d76d1650 | aurel32 | } |
528 | d76d1650 | aurel32 | |
529 | d76d1650 | aurel32 | static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data) |
530 | d76d1650 | aurel32 | { |
531 | d76d1650 | aurel32 | if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) |
532 | d76d1650 | aurel32 | fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
|
533 | d76d1650 | aurel32 | |
534 | bb4ea393 | Jan Kiszka | return 0; |
535 | d76d1650 | aurel32 | } |
536 | d76d1650 | aurel32 | |
537 | d76d1650 | aurel32 | int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) |
538 | d76d1650 | aurel32 | { |
539 | bb4ea393 | Jan Kiszka | int ret;
|
540 | d76d1650 | aurel32 | |
541 | d76d1650 | aurel32 | switch (run->exit_reason) {
|
542 | d76d1650 | aurel32 | case KVM_EXIT_DCR:
|
543 | d76d1650 | aurel32 | if (run->dcr.is_write) {
|
544 | d76d1650 | aurel32 | dprintf("handle dcr write\n");
|
545 | d76d1650 | aurel32 | ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data); |
546 | d76d1650 | aurel32 | } else {
|
547 | d76d1650 | aurel32 | dprintf("handle dcr read\n");
|
548 | d76d1650 | aurel32 | ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data); |
549 | d76d1650 | aurel32 | } |
550 | d76d1650 | aurel32 | break;
|
551 | d76d1650 | aurel32 | case KVM_EXIT_HLT:
|
552 | d76d1650 | aurel32 | dprintf("handle halt\n");
|
553 | d76d1650 | aurel32 | ret = kvmppc_handle_halt(env); |
554 | d76d1650 | aurel32 | break;
|
555 | f61b4bed | Alexander Graf | #ifdef CONFIG_PSERIES
|
556 | f61b4bed | Alexander Graf | case KVM_EXIT_PAPR_HCALL:
|
557 | f61b4bed | Alexander Graf | dprintf("handle PAPR hypercall\n");
|
558 | f61b4bed | Alexander Graf | run->papr_hcall.ret = spapr_hypercall(env, run->papr_hcall.nr, |
559 | f61b4bed | Alexander Graf | run->papr_hcall.args); |
560 | f61b4bed | Alexander Graf | ret = 1;
|
561 | f61b4bed | Alexander Graf | break;
|
562 | f61b4bed | Alexander Graf | #endif
|
563 | 73aaec4a | Jan Kiszka | default:
|
564 | 73aaec4a | Jan Kiszka | fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
|
565 | 73aaec4a | Jan Kiszka | ret = -1;
|
566 | 73aaec4a | Jan Kiszka | break;
|
567 | d76d1650 | aurel32 | } |
568 | d76d1650 | aurel32 | |
569 | d76d1650 | aurel32 | return ret;
|
570 | d76d1650 | aurel32 | } |
571 | d76d1650 | aurel32 | |
572 | dc333cd6 | Alexander Graf | static int read_cpuinfo(const char *field, char *value, int len) |
573 | dc333cd6 | Alexander Graf | { |
574 | dc333cd6 | Alexander Graf | FILE *f; |
575 | dc333cd6 | Alexander Graf | int ret = -1; |
576 | dc333cd6 | Alexander Graf | int field_len = strlen(field);
|
577 | dc333cd6 | Alexander Graf | char line[512]; |
578 | dc333cd6 | Alexander Graf | |
579 | dc333cd6 | Alexander Graf | f = fopen("/proc/cpuinfo", "r"); |
580 | dc333cd6 | Alexander Graf | if (!f) {
|
581 | dc333cd6 | Alexander Graf | return -1; |
582 | dc333cd6 | Alexander Graf | } |
583 | dc333cd6 | Alexander Graf | |
584 | dc333cd6 | Alexander Graf | do {
|
585 | dc333cd6 | Alexander Graf | if(!fgets(line, sizeof(line), f)) { |
586 | dc333cd6 | Alexander Graf | break;
|
587 | dc333cd6 | Alexander Graf | } |
588 | dc333cd6 | Alexander Graf | if (!strncmp(line, field, field_len)) {
|
589 | dc333cd6 | Alexander Graf | strncpy(value, line, len); |
590 | dc333cd6 | Alexander Graf | ret = 0;
|
591 | dc333cd6 | Alexander Graf | break;
|
592 | dc333cd6 | Alexander Graf | } |
593 | dc333cd6 | Alexander Graf | } while(*line);
|
594 | dc333cd6 | Alexander Graf | |
595 | dc333cd6 | Alexander Graf | fclose(f); |
596 | dc333cd6 | Alexander Graf | |
597 | dc333cd6 | Alexander Graf | return ret;
|
598 | dc333cd6 | Alexander Graf | } |
599 | dc333cd6 | Alexander Graf | |
600 | dc333cd6 | Alexander Graf | uint32_t kvmppc_get_tbfreq(void)
|
601 | dc333cd6 | Alexander Graf | { |
602 | dc333cd6 | Alexander Graf | char line[512]; |
603 | dc333cd6 | Alexander Graf | char *ns;
|
604 | dc333cd6 | Alexander Graf | uint32_t retval = get_ticks_per_sec(); |
605 | dc333cd6 | Alexander Graf | |
606 | dc333cd6 | Alexander Graf | if (read_cpuinfo("timebase", line, sizeof(line))) { |
607 | dc333cd6 | Alexander Graf | return retval;
|
608 | dc333cd6 | Alexander Graf | } |
609 | dc333cd6 | Alexander Graf | |
610 | dc333cd6 | Alexander Graf | if (!(ns = strchr(line, ':'))) { |
611 | dc333cd6 | Alexander Graf | return retval;
|
612 | dc333cd6 | Alexander Graf | } |
613 | dc333cd6 | Alexander Graf | |
614 | dc333cd6 | Alexander Graf | ns++; |
615 | dc333cd6 | Alexander Graf | |
616 | dc333cd6 | Alexander Graf | retval = atoi(ns); |
617 | dc333cd6 | Alexander Graf | return retval;
|
618 | dc333cd6 | Alexander Graf | } |
619 | 4513d923 | Gleb Natapov | |
620 | eadaada1 | Alexander Graf | /* Try to find a device tree node for a CPU with clock-frequency property */
|
621 | eadaada1 | Alexander Graf | static int kvmppc_find_cpu_dt(char *buf, int buf_len) |
622 | eadaada1 | Alexander Graf | { |
623 | eadaada1 | Alexander Graf | struct dirent *dirp;
|
624 | eadaada1 | Alexander Graf | DIR *dp; |
625 | eadaada1 | Alexander Graf | |
626 | eadaada1 | Alexander Graf | if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) { |
627 | eadaada1 | Alexander Graf | printf("Can't open directory " PROC_DEVTREE_CPU "\n"); |
628 | eadaada1 | Alexander Graf | return -1; |
629 | eadaada1 | Alexander Graf | } |
630 | eadaada1 | Alexander Graf | |
631 | eadaada1 | Alexander Graf | buf[0] = '\0'; |
632 | eadaada1 | Alexander Graf | while ((dirp = readdir(dp)) != NULL) { |
633 | eadaada1 | Alexander Graf | FILE *f; |
634 | eadaada1 | Alexander Graf | snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
|
635 | eadaada1 | Alexander Graf | dirp->d_name); |
636 | eadaada1 | Alexander Graf | f = fopen(buf, "r");
|
637 | eadaada1 | Alexander Graf | if (f) {
|
638 | eadaada1 | Alexander Graf | snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
|
639 | eadaada1 | Alexander Graf | fclose(f); |
640 | eadaada1 | Alexander Graf | break;
|
641 | eadaada1 | Alexander Graf | } |
642 | eadaada1 | Alexander Graf | buf[0] = '\0'; |
643 | eadaada1 | Alexander Graf | } |
644 | eadaada1 | Alexander Graf | closedir(dp); |
645 | eadaada1 | Alexander Graf | if (buf[0] == '\0') { |
646 | eadaada1 | Alexander Graf | printf("Unknown host!\n");
|
647 | eadaada1 | Alexander Graf | return -1; |
648 | eadaada1 | Alexander Graf | } |
649 | eadaada1 | Alexander Graf | |
650 | eadaada1 | Alexander Graf | return 0; |
651 | eadaada1 | Alexander Graf | } |
652 | eadaada1 | Alexander Graf | |
653 | 9bc884b7 | David Gibson | /* Read a CPU node property from the host device tree that's a single
|
654 | 9bc884b7 | David Gibson | * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
|
655 | 9bc884b7 | David Gibson | * (can't find or open the property, or doesn't understand the
|
656 | 9bc884b7 | David Gibson | * format) */
|
657 | 9bc884b7 | David Gibson | static uint64_t kvmppc_read_int_cpu_dt(const char *propname) |
658 | eadaada1 | Alexander Graf | { |
659 | 9bc884b7 | David Gibson | char buf[PATH_MAX];
|
660 | 9bc884b7 | David Gibson | union {
|
661 | 9bc884b7 | David Gibson | uint32_t v32; |
662 | 9bc884b7 | David Gibson | uint64_t v64; |
663 | 9bc884b7 | David Gibson | } u; |
664 | eadaada1 | Alexander Graf | FILE *f; |
665 | eadaada1 | Alexander Graf | int len;
|
666 | eadaada1 | Alexander Graf | |
667 | eadaada1 | Alexander Graf | if (kvmppc_find_cpu_dt(buf, sizeof(buf))) { |
668 | 9bc884b7 | David Gibson | return -1; |
669 | eadaada1 | Alexander Graf | } |
670 | eadaada1 | Alexander Graf | |
671 | 9bc884b7 | David Gibson | strncat(buf, "/", sizeof(buf) - strlen(buf)); |
672 | 9bc884b7 | David Gibson | strncat(buf, propname, sizeof(buf) - strlen(buf));
|
673 | eadaada1 | Alexander Graf | |
674 | eadaada1 | Alexander Graf | f = fopen(buf, "rb");
|
675 | eadaada1 | Alexander Graf | if (!f) {
|
676 | eadaada1 | Alexander Graf | return -1; |
677 | eadaada1 | Alexander Graf | } |
678 | eadaada1 | Alexander Graf | |
679 | 9bc884b7 | David Gibson | len = fread(&u, 1, sizeof(u), f); |
680 | eadaada1 | Alexander Graf | fclose(f); |
681 | eadaada1 | Alexander Graf | switch (len) {
|
682 | 9bc884b7 | David Gibson | case 4: |
683 | 9bc884b7 | David Gibson | /* property is a 32-bit quantity */
|
684 | 9bc884b7 | David Gibson | return be32_to_cpu(u.v32);
|
685 | 9bc884b7 | David Gibson | case 8: |
686 | 9bc884b7 | David Gibson | return be64_to_cpu(u.v64);
|
687 | eadaada1 | Alexander Graf | } |
688 | eadaada1 | Alexander Graf | |
689 | eadaada1 | Alexander Graf | return 0; |
690 | eadaada1 | Alexander Graf | } |
691 | eadaada1 | Alexander Graf | |
692 | 9bc884b7 | David Gibson | uint64_t kvmppc_get_clockfreq(void)
|
693 | 9bc884b7 | David Gibson | { |
694 | 9bc884b7 | David Gibson | return kvmppc_read_int_cpu_dt("clock-frequency"); |
695 | 9bc884b7 | David Gibson | } |
696 | 9bc884b7 | David Gibson | |
697 | 6659394f | David Gibson | uint32_t kvmppc_get_vmx(void)
|
698 | 6659394f | David Gibson | { |
699 | 6659394f | David Gibson | return kvmppc_read_int_cpu_dt("ibm,vmx"); |
700 | 6659394f | David Gibson | } |
701 | 6659394f | David Gibson | |
702 | 6659394f | David Gibson | uint32_t kvmppc_get_dfp(void)
|
703 | 6659394f | David Gibson | { |
704 | 6659394f | David Gibson | return kvmppc_read_int_cpu_dt("ibm,dfp"); |
705 | 6659394f | David Gibson | } |
706 | 6659394f | David Gibson | |
707 | 45024f09 | Alexander Graf | int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len) |
708 | 45024f09 | Alexander Graf | { |
709 | 45024f09 | Alexander Graf | uint32_t *hc = (uint32_t*)buf; |
710 | 45024f09 | Alexander Graf | |
711 | 45024f09 | Alexander Graf | struct kvm_ppc_pvinfo pvinfo;
|
712 | 45024f09 | Alexander Graf | |
713 | 45024f09 | Alexander Graf | if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
|
714 | 45024f09 | Alexander Graf | !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) { |
715 | 45024f09 | Alexander Graf | memcpy(buf, pvinfo.hcall, buf_len); |
716 | 45024f09 | Alexander Graf | |
717 | 45024f09 | Alexander Graf | return 0; |
718 | 45024f09 | Alexander Graf | } |
719 | 45024f09 | Alexander Graf | |
720 | 45024f09 | Alexander Graf | /*
|
721 | 45024f09 | Alexander Graf | * Fallback to always fail hypercalls:
|
722 | 45024f09 | Alexander Graf | *
|
723 | 45024f09 | Alexander Graf | * li r3, -1
|
724 | 45024f09 | Alexander Graf | * nop
|
725 | 45024f09 | Alexander Graf | * nop
|
726 | 45024f09 | Alexander Graf | * nop
|
727 | 45024f09 | Alexander Graf | */
|
728 | 45024f09 | Alexander Graf | |
729 | 45024f09 | Alexander Graf | hc[0] = 0x3860ffff; |
730 | 45024f09 | Alexander Graf | hc[1] = 0x60000000; |
731 | 45024f09 | Alexander Graf | hc[2] = 0x60000000; |
732 | 45024f09 | Alexander Graf | hc[3] = 0x60000000; |
733 | 45024f09 | Alexander Graf | |
734 | 45024f09 | Alexander Graf | return 0; |
735 | 45024f09 | Alexander Graf | } |
736 | 45024f09 | Alexander Graf | |
737 | f61b4bed | Alexander Graf | void kvmppc_set_papr(CPUState *env)
|
738 | f61b4bed | Alexander Graf | { |
739 | 94135e81 | Alexander Graf | struct kvm_enable_cap cap = {};
|
740 | 94135e81 | Alexander Graf | struct kvm_one_reg reg = {};
|
741 | 94135e81 | Alexander Graf | struct kvm_sregs sregs = {};
|
742 | f61b4bed | Alexander Graf | int ret;
|
743 | f61b4bed | Alexander Graf | |
744 | f61b4bed | Alexander Graf | cap.cap = KVM_CAP_PPC_PAPR; |
745 | f61b4bed | Alexander Graf | ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap); |
746 | f61b4bed | Alexander Graf | |
747 | f61b4bed | Alexander Graf | if (ret) {
|
748 | f61b4bed | Alexander Graf | goto fail;
|
749 | f61b4bed | Alexander Graf | } |
750 | f61b4bed | Alexander Graf | |
751 | f61b4bed | Alexander Graf | /*
|
752 | f61b4bed | Alexander Graf | * XXX We set HIOR here. It really should be a qdev property of
|
753 | f61b4bed | Alexander Graf | * the CPU node, but we don't have CPUs converted to qdev yet.
|
754 | f61b4bed | Alexander Graf | *
|
755 | f61b4bed | Alexander Graf | * Once we have qdev CPUs, move HIOR to a qdev property and
|
756 | f61b4bed | Alexander Graf | * remove this chunk.
|
757 | f61b4bed | Alexander Graf | */
|
758 | 94135e81 | Alexander Graf | reg.id = KVM_ONE_REG_PPC_HIOR; |
759 | 94135e81 | Alexander Graf | reg.u.reg64 = env->spr[SPR_HIOR]; |
760 | 94135e81 | Alexander Graf | ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, ®); |
761 | 94135e81 | Alexander Graf | if (ret) {
|
762 | 94135e81 | Alexander Graf | goto fail;
|
763 | 94135e81 | Alexander Graf | } |
764 | 94135e81 | Alexander Graf | |
765 | 94135e81 | Alexander Graf | /* Set SDR1 so kernel space finds the HTAB */
|
766 | 94135e81 | Alexander Graf | ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); |
767 | 94135e81 | Alexander Graf | if (ret) {
|
768 | 94135e81 | Alexander Graf | goto fail;
|
769 | 94135e81 | Alexander Graf | } |
770 | 94135e81 | Alexander Graf | |
771 | 94135e81 | Alexander Graf | sregs.u.s.sdr1 = env->spr[SPR_SDR1]; |
772 | 94135e81 | Alexander Graf | |
773 | 94135e81 | Alexander Graf | ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs); |
774 | 94135e81 | Alexander Graf | if (ret) {
|
775 | 94135e81 | Alexander Graf | goto fail;
|
776 | 94135e81 | Alexander Graf | } |
777 | f61b4bed | Alexander Graf | |
778 | f61b4bed | Alexander Graf | return;
|
779 | f61b4bed | Alexander Graf | |
780 | f61b4bed | Alexander Graf | fail:
|
781 | f61b4bed | Alexander Graf | cpu_abort(env, "This KVM version does not support PAPR\n");
|
782 | f61b4bed | Alexander Graf | } |
783 | f61b4bed | Alexander Graf | |
784 | e97c3636 | David Gibson | int kvmppc_smt_threads(void) |
785 | e97c3636 | David Gibson | { |
786 | e97c3636 | David Gibson | return cap_ppc_smt ? cap_ppc_smt : 1; |
787 | e97c3636 | David Gibson | } |
788 | e97c3636 | David Gibson | |
789 | 354ac20a | David Gibson | off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem) |
790 | 354ac20a | David Gibson | { |
791 | 354ac20a | David Gibson | void *rma;
|
792 | 354ac20a | David Gibson | off_t size; |
793 | 354ac20a | David Gibson | int fd;
|
794 | 354ac20a | David Gibson | struct kvm_allocate_rma ret;
|
795 | 354ac20a | David Gibson | MemoryRegion *rma_region; |
796 | 354ac20a | David Gibson | |
797 | 354ac20a | David Gibson | /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
|
798 | 354ac20a | David Gibson | * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
|
799 | 354ac20a | David Gibson | * not necessary on this hardware
|
800 | 354ac20a | David Gibson | * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
|
801 | 354ac20a | David Gibson | *
|
802 | 354ac20a | David Gibson | * FIXME: We should allow the user to force contiguous RMA
|
803 | 354ac20a | David Gibson | * allocation in the cap_ppc_rma==1 case.
|
804 | 354ac20a | David Gibson | */
|
805 | 354ac20a | David Gibson | if (cap_ppc_rma < 2) { |
806 | 354ac20a | David Gibson | return 0; |
807 | 354ac20a | David Gibson | } |
808 | 354ac20a | David Gibson | |
809 | 354ac20a | David Gibson | fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret); |
810 | 354ac20a | David Gibson | if (fd < 0) { |
811 | 354ac20a | David Gibson | fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
|
812 | 354ac20a | David Gibson | strerror(errno)); |
813 | 354ac20a | David Gibson | return -1; |
814 | 354ac20a | David Gibson | } |
815 | 354ac20a | David Gibson | |
816 | 354ac20a | David Gibson | size = MIN(ret.rma_size, 256ul << 20); |
817 | 354ac20a | David Gibson | |
818 | 354ac20a | David Gibson | rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); |
819 | 354ac20a | David Gibson | if (rma == MAP_FAILED) {
|
820 | 354ac20a | David Gibson | fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
|
821 | 354ac20a | David Gibson | return -1; |
822 | 354ac20a | David Gibson | }; |
823 | 354ac20a | David Gibson | |
824 | 354ac20a | David Gibson | rma_region = g_new(MemoryRegion, 1);
|
825 | 354ac20a | David Gibson | memory_region_init_ram_ptr(rma_region, NULL, name, size, rma);
|
826 | 354ac20a | David Gibson | memory_region_add_subregion(sysmem, 0, rma_region);
|
827 | 354ac20a | David Gibson | |
828 | 354ac20a | David Gibson | return size;
|
829 | 354ac20a | David Gibson | } |
830 | 354ac20a | David Gibson | |
831 | 0f5cb298 | David Gibson | void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd) |
832 | 0f5cb298 | David Gibson | { |
833 | 0f5cb298 | David Gibson | struct kvm_create_spapr_tce args = {
|
834 | 0f5cb298 | David Gibson | .liobn = liobn, |
835 | 0f5cb298 | David Gibson | .window_size = window_size, |
836 | 0f5cb298 | David Gibson | }; |
837 | 0f5cb298 | David Gibson | long len;
|
838 | 0f5cb298 | David Gibson | int fd;
|
839 | 0f5cb298 | David Gibson | void *table;
|
840 | 0f5cb298 | David Gibson | |
841 | 0f5cb298 | David Gibson | if (!cap_spapr_tce) {
|
842 | 0f5cb298 | David Gibson | return NULL; |
843 | 0f5cb298 | David Gibson | } |
844 | 0f5cb298 | David Gibson | |
845 | 0f5cb298 | David Gibson | fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args); |
846 | 0f5cb298 | David Gibson | if (fd < 0) { |
847 | 0f5cb298 | David Gibson | return NULL; |
848 | 0f5cb298 | David Gibson | } |
849 | 0f5cb298 | David Gibson | |
850 | 0f5cb298 | David Gibson | len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE) * sizeof(VIOsPAPR_RTCE);
|
851 | 0f5cb298 | David Gibson | /* FIXME: round this up to page size */
|
852 | 0f5cb298 | David Gibson | |
853 | 0f5cb298 | David Gibson | table = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); |
854 | 0f5cb298 | David Gibson | if (table == MAP_FAILED) {
|
855 | 0f5cb298 | David Gibson | close(fd); |
856 | 0f5cb298 | David Gibson | return NULL; |
857 | 0f5cb298 | David Gibson | } |
858 | 0f5cb298 | David Gibson | |
859 | 0f5cb298 | David Gibson | *pfd = fd; |
860 | 0f5cb298 | David Gibson | return table;
|
861 | 0f5cb298 | David Gibson | } |
862 | 0f5cb298 | David Gibson | |
863 | 0f5cb298 | David Gibson | int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size) |
864 | 0f5cb298 | David Gibson | { |
865 | 0f5cb298 | David Gibson | long len;
|
866 | 0f5cb298 | David Gibson | |
867 | 0f5cb298 | David Gibson | if (fd < 0) { |
868 | 0f5cb298 | David Gibson | return -1; |
869 | 0f5cb298 | David Gibson | } |
870 | 0f5cb298 | David Gibson | |
871 | 0f5cb298 | David Gibson | len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE)*sizeof(VIOsPAPR_RTCE);
|
872 | 0f5cb298 | David Gibson | if ((munmap(table, len) < 0) || |
873 | 0f5cb298 | David Gibson | (close(fd) < 0)) {
|
874 | 0f5cb298 | David Gibson | fprintf(stderr, "KVM: Unexpected error removing KVM SPAPR TCE "
|
875 | 0f5cb298 | David Gibson | "table: %s", strerror(errno));
|
876 | 0f5cb298 | David Gibson | /* Leak the table */
|
877 | 0f5cb298 | David Gibson | } |
878 | 0f5cb298 | David Gibson | |
879 | 0f5cb298 | David Gibson | return 0; |
880 | 0f5cb298 | David Gibson | } |
881 | 0f5cb298 | David Gibson | |
882 | 4513d923 | Gleb Natapov | bool kvm_arch_stop_on_emulation_error(CPUState *env)
|
883 | 4513d923 | Gleb Natapov | { |
884 | 4513d923 | Gleb Natapov | return true; |
885 | 4513d923 | Gleb Natapov | } |
886 | a1b87fe0 | Jan Kiszka | |
887 | a1b87fe0 | Jan Kiszka | int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr) |
888 | a1b87fe0 | Jan Kiszka | { |
889 | a1b87fe0 | Jan Kiszka | return 1; |
890 | a1b87fe0 | Jan Kiszka | } |
891 | a1b87fe0 | Jan Kiszka | |
892 | a1b87fe0 | Jan Kiszka | int kvm_arch_on_sigbus(int code, void *addr) |
893 | a1b87fe0 | Jan Kiszka | { |
894 | a1b87fe0 | Jan Kiszka | return 1; |
895 | a1b87fe0 | Jan Kiszka | } |