root / target-arm / kvm.c @ a96c0514
History | View | Annotate | Download (24.5 kB)
1 |
/*
|
---|---|
2 |
* ARM implementation of KVM hooks
|
3 |
*
|
4 |
* Copyright Christoffer Dall 2009-2010
|
5 |
*
|
6 |
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
7 |
* See the COPYING file in the top-level directory.
|
8 |
*
|
9 |
*/
|
10 |
|
11 |
#include <stdio.h> |
12 |
#include <sys/types.h> |
13 |
#include <sys/ioctl.h> |
14 |
#include <sys/mman.h> |
15 |
|
16 |
#include <linux/kvm.h> |
17 |
|
18 |
#include "qemu-common.h" |
19 |
#include "qemu/timer.h" |
20 |
#include "sysemu/sysemu.h" |
21 |
#include "sysemu/kvm.h" |
22 |
#include "kvm_arm.h" |
23 |
#include "cpu.h" |
24 |
#include "hw/arm/arm.h" |
25 |
|
26 |
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
27 |
KVM_CAP_LAST_INFO |
28 |
}; |
29 |
|
30 |
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, |
31 |
int *fdarray,
|
32 |
struct kvm_vcpu_init *init)
|
33 |
{ |
34 |
int ret, kvmfd = -1, vmfd = -1, cpufd = -1; |
35 |
|
36 |
kvmfd = qemu_open("/dev/kvm", O_RDWR);
|
37 |
if (kvmfd < 0) { |
38 |
goto err;
|
39 |
} |
40 |
vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
|
41 |
if (vmfd < 0) { |
42 |
goto err;
|
43 |
} |
44 |
cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
|
45 |
if (cpufd < 0) { |
46 |
goto err;
|
47 |
} |
48 |
|
49 |
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init); |
50 |
if (ret >= 0) { |
51 |
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init); |
52 |
if (ret < 0) { |
53 |
goto err;
|
54 |
} |
55 |
} else {
|
56 |
/* Old kernel which doesn't know about the
|
57 |
* PREFERRED_TARGET ioctl: we know it will only support
|
58 |
* creating one kind of guest CPU which is its preferred
|
59 |
* CPU type.
|
60 |
*/
|
61 |
while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
|
62 |
init->target = *cpus_to_try++; |
63 |
memset(init->features, 0, sizeof(init->features)); |
64 |
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init); |
65 |
if (ret >= 0) { |
66 |
break;
|
67 |
} |
68 |
} |
69 |
if (ret < 0) { |
70 |
goto err;
|
71 |
} |
72 |
} |
73 |
|
74 |
fdarray[0] = kvmfd;
|
75 |
fdarray[1] = vmfd;
|
76 |
fdarray[2] = cpufd;
|
77 |
|
78 |
return true; |
79 |
|
80 |
err:
|
81 |
if (cpufd >= 0) { |
82 |
close(cpufd); |
83 |
} |
84 |
if (vmfd >= 0) { |
85 |
close(vmfd); |
86 |
} |
87 |
if (kvmfd >= 0) { |
88 |
close(kvmfd); |
89 |
} |
90 |
|
91 |
return false; |
92 |
} |
93 |
|
94 |
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray) |
95 |
{ |
96 |
int i;
|
97 |
|
98 |
for (i = 2; i >= 0; i--) { |
99 |
close(fdarray[i]); |
100 |
} |
101 |
} |
102 |
|
103 |
static inline void set_feature(uint64_t *features, int feature) |
104 |
{ |
105 |
*features |= 1ULL << feature;
|
106 |
} |
107 |
|
108 |
bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
|
109 |
{ |
110 |
/* Identify the feature bits corresponding to the host CPU, and
|
111 |
* fill out the ARMHostCPUClass fields accordingly. To do this
|
112 |
* we have to create a scratch VM, create a single CPU inside it,
|
113 |
* and then query that CPU for the relevant ID registers.
|
114 |
*/
|
115 |
int i, ret, fdarray[3]; |
116 |
uint32_t midr, id_pfr0, id_isar0, mvfr1; |
117 |
uint64_t features = 0;
|
118 |
/* Old kernels may not know about the PREFERRED_TARGET ioctl: however
|
119 |
* we know these will only support creating one kind of guest CPU,
|
120 |
* which is its preferred CPU type.
|
121 |
*/
|
122 |
static const uint32_t cpus_to_try[] = { |
123 |
QEMU_KVM_ARM_TARGET_CORTEX_A15, |
124 |
QEMU_KVM_ARM_TARGET_NONE |
125 |
}; |
126 |
struct kvm_vcpu_init init;
|
127 |
struct kvm_one_reg idregs[] = {
|
128 |
{ |
129 |
.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
130 |
| ENCODE_CP_REG(15, 0, 0, 0, 0, 0), |
131 |
.addr = (uintptr_t)&midr, |
132 |
}, |
133 |
{ |
134 |
.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
135 |
| ENCODE_CP_REG(15, 0, 0, 1, 0, 0), |
136 |
.addr = (uintptr_t)&id_pfr0, |
137 |
}, |
138 |
{ |
139 |
.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
140 |
| ENCODE_CP_REG(15, 0, 0, 2, 0, 0), |
141 |
.addr = (uintptr_t)&id_isar0, |
142 |
}, |
143 |
{ |
144 |
.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
145 |
| KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1, |
146 |
.addr = (uintptr_t)&mvfr1, |
147 |
}, |
148 |
}; |
149 |
|
150 |
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
|
151 |
return false; |
152 |
} |
153 |
|
154 |
ahcc->target = init.target; |
155 |
|
156 |
/* This is not strictly blessed by the device tree binding docs yet,
|
157 |
* but in practice the kernel does not care about this string so
|
158 |
* there is no point maintaining an KVM_ARM_TARGET_* -> string table.
|
159 |
*/
|
160 |
ahcc->dtb_compatible = "arm,arm-v7";
|
161 |
|
162 |
for (i = 0; i < ARRAY_SIZE(idregs); i++) { |
163 |
ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
|
164 |
if (ret) {
|
165 |
break;
|
166 |
} |
167 |
} |
168 |
|
169 |
kvm_arm_destroy_scratch_host_vcpu(fdarray); |
170 |
|
171 |
if (ret) {
|
172 |
return false; |
173 |
} |
174 |
|
175 |
/* Now we've retrieved all the register information we can
|
176 |
* set the feature bits based on the ID register fields.
|
177 |
* We can assume any KVM supporting CPU is at least a v7
|
178 |
* with VFPv3, LPAE and the generic timers; this in turn implies
|
179 |
* most of the other feature bits, but a few must be tested.
|
180 |
*/
|
181 |
set_feature(&features, ARM_FEATURE_V7); |
182 |
set_feature(&features, ARM_FEATURE_VFP3); |
183 |
set_feature(&features, ARM_FEATURE_LPAE); |
184 |
set_feature(&features, ARM_FEATURE_GENERIC_TIMER); |
185 |
|
186 |
switch (extract32(id_isar0, 24, 4)) { |
187 |
case 1: |
188 |
set_feature(&features, ARM_FEATURE_THUMB_DIV); |
189 |
break;
|
190 |
case 2: |
191 |
set_feature(&features, ARM_FEATURE_ARM_DIV); |
192 |
set_feature(&features, ARM_FEATURE_THUMB_DIV); |
193 |
break;
|
194 |
default:
|
195 |
break;
|
196 |
} |
197 |
|
198 |
if (extract32(id_pfr0, 12, 4) == 1) { |
199 |
set_feature(&features, ARM_FEATURE_THUMB2EE); |
200 |
} |
201 |
if (extract32(mvfr1, 20, 4) == 1) { |
202 |
set_feature(&features, ARM_FEATURE_VFP_FP16); |
203 |
} |
204 |
if (extract32(mvfr1, 12, 4) == 1) { |
205 |
set_feature(&features, ARM_FEATURE_NEON); |
206 |
} |
207 |
if (extract32(mvfr1, 28, 4) == 1) { |
208 |
/* FMAC support implies VFPv4 */
|
209 |
set_feature(&features, ARM_FEATURE_VFP4); |
210 |
} |
211 |
|
212 |
ahcc->features = features; |
213 |
|
214 |
return true; |
215 |
} |
216 |
|
217 |
static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data) |
218 |
{ |
219 |
ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc); |
220 |
|
221 |
/* All we really need to set up for the 'host' CPU
|
222 |
* is the feature bits -- we rely on the fact that the
|
223 |
* various ID register values in ARMCPU are only used for
|
224 |
* TCG CPUs.
|
225 |
*/
|
226 |
if (!kvm_arm_get_host_cpu_features(ahcc)) {
|
227 |
fprintf(stderr, "Failed to retrieve host CPU features!\n");
|
228 |
abort(); |
229 |
} |
230 |
} |
231 |
|
232 |
static void kvm_arm_host_cpu_initfn(Object *obj) |
233 |
{ |
234 |
ARMHostCPUClass *ahcc = ARM_HOST_CPU_GET_CLASS(obj); |
235 |
ARMCPU *cpu = ARM_CPU(obj); |
236 |
CPUARMState *env = &cpu->env; |
237 |
|
238 |
cpu->kvm_target = ahcc->target; |
239 |
cpu->dtb_compatible = ahcc->dtb_compatible; |
240 |
env->features = ahcc->features; |
241 |
} |
242 |
|
243 |
static const TypeInfo host_arm_cpu_type_info = { |
244 |
.name = TYPE_ARM_HOST_CPU, |
245 |
.parent = TYPE_ARM_CPU, |
246 |
.instance_init = kvm_arm_host_cpu_initfn, |
247 |
.class_init = kvm_arm_host_cpu_class_init, |
248 |
.class_size = sizeof(ARMHostCPUClass),
|
249 |
}; |
250 |
|
251 |
int kvm_arch_init(KVMState *s)
|
252 |
{ |
253 |
/* For ARM interrupt delivery is always asynchronous,
|
254 |
* whether we are using an in-kernel VGIC or not.
|
255 |
*/
|
256 |
kvm_async_interrupts_allowed = true;
|
257 |
|
258 |
type_register_static(&host_arm_cpu_type_info); |
259 |
|
260 |
return 0; |
261 |
} |
262 |
|
263 |
unsigned long kvm_arch_vcpu_id(CPUState *cpu) |
264 |
{ |
265 |
return cpu->cpu_index;
|
266 |
} |
267 |
|
268 |
static bool reg_syncs_via_tuple_list(uint64_t regidx) |
269 |
{ |
270 |
/* Return true if the regidx is a register we should synchronize
|
271 |
* via the cpreg_tuples array (ie is not a core reg we sync by
|
272 |
* hand in kvm_arch_get/put_registers())
|
273 |
*/
|
274 |
switch (regidx & KVM_REG_ARM_COPROC_MASK) {
|
275 |
case KVM_REG_ARM_CORE:
|
276 |
case KVM_REG_ARM_VFP:
|
277 |
return false; |
278 |
default:
|
279 |
return true; |
280 |
} |
281 |
} |
282 |
|
283 |
static int compare_u64(const void *a, const void *b) |
284 |
{ |
285 |
if (*(uint64_t *)a > *(uint64_t *)b) {
|
286 |
return 1; |
287 |
} |
288 |
if (*(uint64_t *)a < *(uint64_t *)b) {
|
289 |
return -1; |
290 |
} |
291 |
return 0; |
292 |
} |
293 |
|
294 |
int kvm_arch_init_vcpu(CPUState *cs)
|
295 |
{ |
296 |
struct kvm_vcpu_init init;
|
297 |
int i, ret, arraylen;
|
298 |
uint64_t v; |
299 |
struct kvm_one_reg r;
|
300 |
struct kvm_reg_list rl;
|
301 |
struct kvm_reg_list *rlp;
|
302 |
ARMCPU *cpu = ARM_CPU(cs); |
303 |
|
304 |
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
|
305 |
fprintf(stderr, "KVM is not supported for this guest CPU type\n");
|
306 |
return -EINVAL;
|
307 |
} |
308 |
|
309 |
init.target = cpu->kvm_target; |
310 |
memset(init.features, 0, sizeof(init.features)); |
311 |
if (cpu->start_powered_off) {
|
312 |
init.features[0] = 1 << KVM_ARM_VCPU_POWER_OFF; |
313 |
} |
314 |
ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); |
315 |
if (ret) {
|
316 |
return ret;
|
317 |
} |
318 |
/* Query the kernel to make sure it supports 32 VFP
|
319 |
* registers: QEMU's "cortex-a15" CPU is always a
|
320 |
* VFP-D32 core. The simplest way to do this is just
|
321 |
* to attempt to read register d31.
|
322 |
*/
|
323 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
|
324 |
r.addr = (uintptr_t)(&v); |
325 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
326 |
if (ret == -ENOENT) {
|
327 |
return -EINVAL;
|
328 |
} |
329 |
|
330 |
/* Populate the cpreg list based on the kernel's idea
|
331 |
* of what registers exist (and throw away the TCG-created list).
|
332 |
*/
|
333 |
rl.n = 0;
|
334 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl); |
335 |
if (ret != -E2BIG) {
|
336 |
return ret;
|
337 |
} |
338 |
rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t)); |
339 |
rlp->n = rl.n; |
340 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp); |
341 |
if (ret) {
|
342 |
goto out;
|
343 |
} |
344 |
/* Sort the list we get back from the kernel, since cpreg_tuples
|
345 |
* must be in strictly ascending order.
|
346 |
*/
|
347 |
qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64); |
348 |
|
349 |
for (i = 0, arraylen = 0; i < rlp->n; i++) { |
350 |
if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
|
351 |
continue;
|
352 |
} |
353 |
switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
|
354 |
case KVM_REG_SIZE_U32:
|
355 |
case KVM_REG_SIZE_U64:
|
356 |
break;
|
357 |
default:
|
358 |
fprintf(stderr, "Can't handle size of register in kernel list\n");
|
359 |
ret = -EINVAL; |
360 |
goto out;
|
361 |
} |
362 |
|
363 |
arraylen++; |
364 |
} |
365 |
|
366 |
cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen); |
367 |
cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen); |
368 |
cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes, |
369 |
arraylen); |
370 |
cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values, |
371 |
arraylen); |
372 |
cpu->cpreg_array_len = arraylen; |
373 |
cpu->cpreg_vmstate_array_len = arraylen; |
374 |
|
375 |
for (i = 0, arraylen = 0; i < rlp->n; i++) { |
376 |
uint64_t regidx = rlp->reg[i]; |
377 |
if (!reg_syncs_via_tuple_list(regidx)) {
|
378 |
continue;
|
379 |
} |
380 |
cpu->cpreg_indexes[arraylen] = regidx; |
381 |
arraylen++; |
382 |
} |
383 |
assert(cpu->cpreg_array_len == arraylen); |
384 |
|
385 |
if (!write_kvmstate_to_list(cpu)) {
|
386 |
/* Shouldn't happen unless kernel is inconsistent about
|
387 |
* what registers exist.
|
388 |
*/
|
389 |
fprintf(stderr, "Initial read of kernel register state failed\n");
|
390 |
ret = -EINVAL; |
391 |
goto out;
|
392 |
} |
393 |
|
394 |
/* Save a copy of the initial register values so that we can
|
395 |
* feed it back to the kernel on VCPU reset.
|
396 |
*/
|
397 |
cpu->cpreg_reset_values = g_memdup(cpu->cpreg_values, |
398 |
cpu->cpreg_array_len * |
399 |
sizeof(cpu->cpreg_values[0])); |
400 |
|
401 |
out:
|
402 |
g_free(rlp); |
403 |
return ret;
|
404 |
} |
405 |
|
406 |
/* We track all the KVM devices which need their memory addresses
|
407 |
* passing to the kernel in a list of these structures.
|
408 |
* When board init is complete we run through the list and
|
409 |
* tell the kernel the base addresses of the memory regions.
|
410 |
* We use a MemoryListener to track mapping and unmapping of
|
411 |
* the regions during board creation, so the board models don't
|
412 |
* need to do anything special for the KVM case.
|
413 |
*/
|
414 |
typedef struct KVMDevice { |
415 |
struct kvm_arm_device_addr kda;
|
416 |
MemoryRegion *mr; |
417 |
QSLIST_ENTRY(KVMDevice) entries; |
418 |
} KVMDevice; |
419 |
|
420 |
static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
|
421 |
|
422 |
static void kvm_arm_devlistener_add(MemoryListener *listener, |
423 |
MemoryRegionSection *section) |
424 |
{ |
425 |
KVMDevice *kd; |
426 |
|
427 |
QSLIST_FOREACH(kd, &kvm_devices_head, entries) { |
428 |
if (section->mr == kd->mr) {
|
429 |
kd->kda.addr = section->offset_within_address_space; |
430 |
} |
431 |
} |
432 |
} |
433 |
|
434 |
static void kvm_arm_devlistener_del(MemoryListener *listener, |
435 |
MemoryRegionSection *section) |
436 |
{ |
437 |
KVMDevice *kd; |
438 |
|
439 |
QSLIST_FOREACH(kd, &kvm_devices_head, entries) { |
440 |
if (section->mr == kd->mr) {
|
441 |
kd->kda.addr = -1;
|
442 |
} |
443 |
} |
444 |
} |
445 |
|
446 |
static MemoryListener devlistener = {
|
447 |
.region_add = kvm_arm_devlistener_add, |
448 |
.region_del = kvm_arm_devlistener_del, |
449 |
}; |
450 |
|
451 |
static void kvm_arm_machine_init_done(Notifier *notifier, void *data) |
452 |
{ |
453 |
KVMDevice *kd, *tkd; |
454 |
|
455 |
memory_listener_unregister(&devlistener); |
456 |
QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) { |
457 |
if (kd->kda.addr != -1) { |
458 |
if (kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR,
|
459 |
&kd->kda) < 0) {
|
460 |
fprintf(stderr, "KVM_ARM_SET_DEVICE_ADDRESS failed: %s\n",
|
461 |
strerror(errno)); |
462 |
abort(); |
463 |
} |
464 |
} |
465 |
memory_region_unref(kd->mr); |
466 |
g_free(kd); |
467 |
} |
468 |
} |
469 |
|
470 |
static Notifier notify = {
|
471 |
.notify = kvm_arm_machine_init_done, |
472 |
}; |
473 |
|
474 |
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid)
|
475 |
{ |
476 |
KVMDevice *kd; |
477 |
|
478 |
if (!kvm_irqchip_in_kernel()) {
|
479 |
return;
|
480 |
} |
481 |
|
482 |
if (QSLIST_EMPTY(&kvm_devices_head)) {
|
483 |
memory_listener_register(&devlistener, NULL);
|
484 |
qemu_add_machine_init_done_notifier(¬ify); |
485 |
} |
486 |
kd = g_new0(KVMDevice, 1);
|
487 |
kd->mr = mr; |
488 |
kd->kda.id = devid; |
489 |
kd->kda.addr = -1;
|
490 |
QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries); |
491 |
memory_region_ref(kd->mr); |
492 |
} |
493 |
|
494 |
bool write_kvmstate_to_list(ARMCPU *cpu)
|
495 |
{ |
496 |
CPUState *cs = CPU(cpu); |
497 |
int i;
|
498 |
bool ok = true; |
499 |
|
500 |
for (i = 0; i < cpu->cpreg_array_len; i++) { |
501 |
struct kvm_one_reg r;
|
502 |
uint64_t regidx = cpu->cpreg_indexes[i]; |
503 |
uint32_t v32; |
504 |
int ret;
|
505 |
|
506 |
r.id = regidx; |
507 |
|
508 |
switch (regidx & KVM_REG_SIZE_MASK) {
|
509 |
case KVM_REG_SIZE_U32:
|
510 |
r.addr = (uintptr_t)&v32; |
511 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
512 |
if (!ret) {
|
513 |
cpu->cpreg_values[i] = v32; |
514 |
} |
515 |
break;
|
516 |
case KVM_REG_SIZE_U64:
|
517 |
r.addr = (uintptr_t)(cpu->cpreg_values + i); |
518 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
519 |
break;
|
520 |
default:
|
521 |
abort(); |
522 |
} |
523 |
if (ret) {
|
524 |
ok = false;
|
525 |
} |
526 |
} |
527 |
return ok;
|
528 |
} |
529 |
|
530 |
bool write_list_to_kvmstate(ARMCPU *cpu)
|
531 |
{ |
532 |
CPUState *cs = CPU(cpu); |
533 |
int i;
|
534 |
bool ok = true; |
535 |
|
536 |
for (i = 0; i < cpu->cpreg_array_len; i++) { |
537 |
struct kvm_one_reg r;
|
538 |
uint64_t regidx = cpu->cpreg_indexes[i]; |
539 |
uint32_t v32; |
540 |
int ret;
|
541 |
|
542 |
r.id = regidx; |
543 |
switch (regidx & KVM_REG_SIZE_MASK) {
|
544 |
case KVM_REG_SIZE_U32:
|
545 |
v32 = cpu->cpreg_values[i]; |
546 |
r.addr = (uintptr_t)&v32; |
547 |
break;
|
548 |
case KVM_REG_SIZE_U64:
|
549 |
r.addr = (uintptr_t)(cpu->cpreg_values + i); |
550 |
break;
|
551 |
default:
|
552 |
abort(); |
553 |
} |
554 |
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
555 |
if (ret) {
|
556 |
/* We might fail for "unknown register" and also for
|
557 |
* "you tried to set a register which is constant with
|
558 |
* a different value from what it actually contains".
|
559 |
*/
|
560 |
ok = false;
|
561 |
} |
562 |
} |
563 |
return ok;
|
564 |
} |
565 |
|
566 |
typedef struct Reg { |
567 |
uint64_t id; |
568 |
int offset;
|
569 |
} Reg; |
570 |
|
571 |
#define COREREG(KERNELNAME, QEMUFIELD) \
|
572 |
{ \ |
573 |
KVM_REG_ARM | KVM_REG_SIZE_U32 | \ |
574 |
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \ |
575 |
offsetof(CPUARMState, QEMUFIELD) \ |
576 |
} |
577 |
|
578 |
#define VFPSYSREG(R) \
|
579 |
{ \ |
580 |
KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \ |
581 |
KVM_REG_ARM_VFP_##R, \ |
582 |
offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \ |
583 |
} |
584 |
|
585 |
static const Reg regs[] = { |
586 |
/* R0_usr .. R14_usr */
|
587 |
COREREG(usr_regs.uregs[0], regs[0]), |
588 |
COREREG(usr_regs.uregs[1], regs[1]), |
589 |
COREREG(usr_regs.uregs[2], regs[2]), |
590 |
COREREG(usr_regs.uregs[3], regs[3]), |
591 |
COREREG(usr_regs.uregs[4], regs[4]), |
592 |
COREREG(usr_regs.uregs[5], regs[5]), |
593 |
COREREG(usr_regs.uregs[6], regs[6]), |
594 |
COREREG(usr_regs.uregs[7], regs[7]), |
595 |
COREREG(usr_regs.uregs[8], usr_regs[0]), |
596 |
COREREG(usr_regs.uregs[9], usr_regs[1]), |
597 |
COREREG(usr_regs.uregs[10], usr_regs[2]), |
598 |
COREREG(usr_regs.uregs[11], usr_regs[3]), |
599 |
COREREG(usr_regs.uregs[12], usr_regs[4]), |
600 |
COREREG(usr_regs.uregs[13], banked_r13[0]), |
601 |
COREREG(usr_regs.uregs[14], banked_r14[0]), |
602 |
/* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
|
603 |
COREREG(svc_regs[0], banked_r13[1]), |
604 |
COREREG(svc_regs[1], banked_r14[1]), |
605 |
COREREG(svc_regs[2], banked_spsr[1]), |
606 |
COREREG(abt_regs[0], banked_r13[2]), |
607 |
COREREG(abt_regs[1], banked_r14[2]), |
608 |
COREREG(abt_regs[2], banked_spsr[2]), |
609 |
COREREG(und_regs[0], banked_r13[3]), |
610 |
COREREG(und_regs[1], banked_r14[3]), |
611 |
COREREG(und_regs[2], banked_spsr[3]), |
612 |
COREREG(irq_regs[0], banked_r13[4]), |
613 |
COREREG(irq_regs[1], banked_r14[4]), |
614 |
COREREG(irq_regs[2], banked_spsr[4]), |
615 |
/* R8_fiq .. R14_fiq and SPSR_fiq */
|
616 |
COREREG(fiq_regs[0], fiq_regs[0]), |
617 |
COREREG(fiq_regs[1], fiq_regs[1]), |
618 |
COREREG(fiq_regs[2], fiq_regs[2]), |
619 |
COREREG(fiq_regs[3], fiq_regs[3]), |
620 |
COREREG(fiq_regs[4], fiq_regs[4]), |
621 |
COREREG(fiq_regs[5], banked_r13[5]), |
622 |
COREREG(fiq_regs[6], banked_r14[5]), |
623 |
COREREG(fiq_regs[7], banked_spsr[5]), |
624 |
/* R15 */
|
625 |
COREREG(usr_regs.uregs[15], regs[15]), |
626 |
/* VFP system registers */
|
627 |
VFPSYSREG(FPSID), |
628 |
VFPSYSREG(MVFR1), |
629 |
VFPSYSREG(MVFR0), |
630 |
VFPSYSREG(FPEXC), |
631 |
VFPSYSREG(FPINST), |
632 |
VFPSYSREG(FPINST2), |
633 |
}; |
634 |
|
635 |
int kvm_arch_put_registers(CPUState *cs, int level) |
636 |
{ |
637 |
ARMCPU *cpu = ARM_CPU(cs); |
638 |
CPUARMState *env = &cpu->env; |
639 |
struct kvm_one_reg r;
|
640 |
int mode, bn;
|
641 |
int ret, i;
|
642 |
uint32_t cpsr, fpscr; |
643 |
|
644 |
/* Make sure the banked regs are properly set */
|
645 |
mode = env->uncached_cpsr & CPSR_M; |
646 |
bn = bank_number(mode); |
647 |
if (mode == ARM_CPU_MODE_FIQ) {
|
648 |
memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); |
649 |
} else {
|
650 |
memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); |
651 |
} |
652 |
env->banked_r13[bn] = env->regs[13];
|
653 |
env->banked_r14[bn] = env->regs[14];
|
654 |
env->banked_spsr[bn] = env->spsr; |
655 |
|
656 |
/* Now we can safely copy stuff down to the kernel */
|
657 |
for (i = 0; i < ARRAY_SIZE(regs); i++) { |
658 |
r.id = regs[i].id; |
659 |
r.addr = (uintptr_t)(env) + regs[i].offset; |
660 |
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
661 |
if (ret) {
|
662 |
return ret;
|
663 |
} |
664 |
} |
665 |
|
666 |
/* Special cases which aren't a single CPUARMState field */
|
667 |
cpsr = cpsr_read(env); |
668 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
669 |
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); |
670 |
r.addr = (uintptr_t)(&cpsr); |
671 |
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
672 |
if (ret) {
|
673 |
return ret;
|
674 |
} |
675 |
|
676 |
/* VFP registers */
|
677 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; |
678 |
for (i = 0; i < 32; i++) { |
679 |
r.addr = (uintptr_t)(&env->vfp.regs[i]); |
680 |
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
681 |
if (ret) {
|
682 |
return ret;
|
683 |
} |
684 |
r.id++; |
685 |
} |
686 |
|
687 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | |
688 |
KVM_REG_ARM_VFP_FPSCR; |
689 |
fpscr = vfp_get_fpscr(env); |
690 |
r.addr = (uintptr_t)&fpscr; |
691 |
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
692 |
if (ret) {
|
693 |
return ret;
|
694 |
} |
695 |
|
696 |
/* Note that we do not call write_cpustate_to_list()
|
697 |
* here, so we are only writing the tuple list back to
|
698 |
* KVM. This is safe because nothing can change the
|
699 |
* CPUARMState cp15 fields (in particular gdb accesses cannot)
|
700 |
* and so there are no changes to sync. In fact syncing would
|
701 |
* be wrong at this point: for a constant register where TCG and
|
702 |
* KVM disagree about its value, the preceding write_list_to_cpustate()
|
703 |
* would not have had any effect on the CPUARMState value (since the
|
704 |
* register is read-only), and a write_cpustate_to_list() here would
|
705 |
* then try to write the TCG value back into KVM -- this would either
|
706 |
* fail or incorrectly change the value the guest sees.
|
707 |
*
|
708 |
* If we ever want to allow the user to modify cp15 registers via
|
709 |
* the gdb stub, we would need to be more clever here (for instance
|
710 |
* tracking the set of registers kvm_arch_get_registers() successfully
|
711 |
* managed to update the CPUARMState with, and only allowing those
|
712 |
* to be written back up into the kernel).
|
713 |
*/
|
714 |
if (!write_list_to_kvmstate(cpu)) {
|
715 |
return EINVAL;
|
716 |
} |
717 |
|
718 |
return ret;
|
719 |
} |
720 |
|
721 |
int kvm_arch_get_registers(CPUState *cs)
|
722 |
{ |
723 |
ARMCPU *cpu = ARM_CPU(cs); |
724 |
CPUARMState *env = &cpu->env; |
725 |
struct kvm_one_reg r;
|
726 |
int mode, bn;
|
727 |
int ret, i;
|
728 |
uint32_t cpsr, fpscr; |
729 |
|
730 |
for (i = 0; i < ARRAY_SIZE(regs); i++) { |
731 |
r.id = regs[i].id; |
732 |
r.addr = (uintptr_t)(env) + regs[i].offset; |
733 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
734 |
if (ret) {
|
735 |
return ret;
|
736 |
} |
737 |
} |
738 |
|
739 |
/* Special cases which aren't a single CPUARMState field */
|
740 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
741 |
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); |
742 |
r.addr = (uintptr_t)(&cpsr); |
743 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
744 |
if (ret) {
|
745 |
return ret;
|
746 |
} |
747 |
cpsr_write(env, cpsr, 0xffffffff);
|
748 |
|
749 |
/* Make sure the current mode regs are properly set */
|
750 |
mode = env->uncached_cpsr & CPSR_M; |
751 |
bn = bank_number(mode); |
752 |
if (mode == ARM_CPU_MODE_FIQ) {
|
753 |
memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); |
754 |
} else {
|
755 |
memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); |
756 |
} |
757 |
env->regs[13] = env->banked_r13[bn];
|
758 |
env->regs[14] = env->banked_r14[bn];
|
759 |
env->spsr = env->banked_spsr[bn]; |
760 |
|
761 |
/* VFP registers */
|
762 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; |
763 |
for (i = 0; i < 32; i++) { |
764 |
r.addr = (uintptr_t)(&env->vfp.regs[i]); |
765 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
766 |
if (ret) {
|
767 |
return ret;
|
768 |
} |
769 |
r.id++; |
770 |
} |
771 |
|
772 |
r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | |
773 |
KVM_REG_ARM_VFP_FPSCR; |
774 |
r.addr = (uintptr_t)&fpscr; |
775 |
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
776 |
if (ret) {
|
777 |
return ret;
|
778 |
} |
779 |
vfp_set_fpscr(env, fpscr); |
780 |
|
781 |
if (!write_kvmstate_to_list(cpu)) {
|
782 |
return EINVAL;
|
783 |
} |
784 |
/* Note that it's OK to have registers which aren't in CPUState,
|
785 |
* so we can ignore a failure return here.
|
786 |
*/
|
787 |
write_list_to_cpustate(cpu); |
788 |
|
789 |
return 0; |
790 |
} |
791 |
|
792 |
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) |
793 |
{ |
794 |
} |
795 |
|
796 |
void kvm_arch_post_run(CPUState *cs, struct kvm_run *run) |
797 |
{ |
798 |
} |
799 |
|
800 |
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) |
801 |
{ |
802 |
return 0; |
803 |
} |
804 |
|
805 |
void kvm_arch_reset_vcpu(CPUState *cs)
|
806 |
{ |
807 |
/* Feed the kernel back its initial register state */
|
808 |
ARMCPU *cpu = ARM_CPU(cs); |
809 |
|
810 |
memmove(cpu->cpreg_values, cpu->cpreg_reset_values, |
811 |
cpu->cpreg_array_len * sizeof(cpu->cpreg_values[0])); |
812 |
|
813 |
if (!write_list_to_kvmstate(cpu)) {
|
814 |
abort(); |
815 |
} |
816 |
} |
817 |
|
818 |
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
|
819 |
{ |
820 |
return true; |
821 |
} |
822 |
|
823 |
int kvm_arch_process_async_events(CPUState *cs)
|
824 |
{ |
825 |
return 0; |
826 |
} |
827 |
|
828 |
int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr) |
829 |
{ |
830 |
return 1; |
831 |
} |
832 |
|
833 |
int kvm_arch_on_sigbus(int code, void *addr) |
834 |
{ |
835 |
return 1; |
836 |
} |
837 |
|
838 |
void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) |
839 |
{ |
840 |
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
841 |
} |
842 |
|
843 |
int kvm_arch_insert_sw_breakpoint(CPUState *cs,
|
844 |
struct kvm_sw_breakpoint *bp)
|
845 |
{ |
846 |
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
847 |
return -EINVAL;
|
848 |
} |
849 |
|
850 |
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
851 |
target_ulong len, int type)
|
852 |
{ |
853 |
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
854 |
return -EINVAL;
|
855 |
} |
856 |
|
857 |
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
858 |
target_ulong len, int type)
|
859 |
{ |
860 |
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
861 |
return -EINVAL;
|
862 |
} |
863 |
|
864 |
int kvm_arch_remove_sw_breakpoint(CPUState *cs,
|
865 |
struct kvm_sw_breakpoint *bp)
|
866 |
{ |
867 |
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
868 |
return -EINVAL;
|
869 |
} |
870 |
|
871 |
void kvm_arch_remove_all_hw_breakpoints(void) |
872 |
{ |
873 |
qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
874 |
} |
875 |
|
876 |
void kvm_arch_init_irq_routing(KVMState *s)
|
877 |
{ |
878 |
} |