Statistics
| Branch: | Revision:

root / target-arm / kvm32.c @ 1ed69e82

History | View | Annotate | Download (15.7 kB)

1
/*
2
 * ARM implementation of KVM hooks, 32 bit specific code.
3
 *
4
 * Copyright Christoffer Dall 2009-2010
5
 *
6
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7
 * See the COPYING file in the top-level directory.
8
 *
9
 */
10

    
11
#include <stdio.h>
12
#include <sys/types.h>
13
#include <sys/ioctl.h>
14
#include <sys/mman.h>
15

    
16
#include <linux/kvm.h>
17

    
18
#include "qemu-common.h"
19
#include "qemu/timer.h"
20
#include "sysemu/sysemu.h"
21
#include "sysemu/kvm.h"
22
#include "kvm_arm.h"
23
#include "cpu.h"
24
#include "hw/arm/arm.h"
25

    
26
static inline void set_feature(uint64_t *features, int feature)
27
{
28
    *features |= 1ULL << feature;
29
}
30

    
31
bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
32
{
33
    /* Identify the feature bits corresponding to the host CPU, and
34
     * fill out the ARMHostCPUClass fields accordingly. To do this
35
     * we have to create a scratch VM, create a single CPU inside it,
36
     * and then query that CPU for the relevant ID registers.
37
     */
38
    int i, ret, fdarray[3];
39
    uint32_t midr, id_pfr0, id_isar0, mvfr1;
40
    uint64_t features = 0;
41
    /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
42
     * we know these will only support creating one kind of guest CPU,
43
     * which is its preferred CPU type.
44
     */
45
    static const uint32_t cpus_to_try[] = {
46
        QEMU_KVM_ARM_TARGET_CORTEX_A15,
47
        QEMU_KVM_ARM_TARGET_NONE
48
    };
49
    struct kvm_vcpu_init init;
50
    struct kvm_one_reg idregs[] = {
51
        {
52
            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
53
            | ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
54
            .addr = (uintptr_t)&midr,
55
        },
56
        {
57
            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
58
            | ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
59
            .addr = (uintptr_t)&id_pfr0,
60
        },
61
        {
62
            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
63
            | ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
64
            .addr = (uintptr_t)&id_isar0,
65
        },
66
        {
67
            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
68
            | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
69
            .addr = (uintptr_t)&mvfr1,
70
        },
71
    };
72

    
73
    if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
74
        return false;
75
    }
76

    
77
    ahcc->target = init.target;
78

    
79
    /* This is not strictly blessed by the device tree binding docs yet,
80
     * but in practice the kernel does not care about this string so
81
     * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
82
     */
83
    ahcc->dtb_compatible = "arm,arm-v7";
84

    
85
    for (i = 0; i < ARRAY_SIZE(idregs); i++) {
86
        ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
87
        if (ret) {
88
            break;
89
        }
90
    }
91

    
92
    kvm_arm_destroy_scratch_host_vcpu(fdarray);
93

    
94
    if (ret) {
95
        return false;
96
    }
97

    
98
    /* Now we've retrieved all the register information we can
99
     * set the feature bits based on the ID register fields.
100
     * We can assume any KVM supporting CPU is at least a v7
101
     * with VFPv3, LPAE and the generic timers; this in turn implies
102
     * most of the other feature bits, but a few must be tested.
103
     */
104
    set_feature(&features, ARM_FEATURE_V7);
105
    set_feature(&features, ARM_FEATURE_VFP3);
106
    set_feature(&features, ARM_FEATURE_LPAE);
107
    set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
108

    
109
    switch (extract32(id_isar0, 24, 4)) {
110
    case 1:
111
        set_feature(&features, ARM_FEATURE_THUMB_DIV);
112
        break;
113
    case 2:
114
        set_feature(&features, ARM_FEATURE_ARM_DIV);
115
        set_feature(&features, ARM_FEATURE_THUMB_DIV);
116
        break;
117
    default:
118
        break;
119
    }
120

    
121
    if (extract32(id_pfr0, 12, 4) == 1) {
122
        set_feature(&features, ARM_FEATURE_THUMB2EE);
123
    }
124
    if (extract32(mvfr1, 20, 4) == 1) {
125
        set_feature(&features, ARM_FEATURE_VFP_FP16);
126
    }
127
    if (extract32(mvfr1, 12, 4) == 1) {
128
        set_feature(&features, ARM_FEATURE_NEON);
129
    }
130
    if (extract32(mvfr1, 28, 4) == 1) {
131
        /* FMAC support implies VFPv4 */
132
        set_feature(&features, ARM_FEATURE_VFP4);
133
    }
134

    
135
    ahcc->features = features;
136

    
137
    return true;
138
}
139

    
140
static bool reg_syncs_via_tuple_list(uint64_t regidx)
141
{
142
    /* Return true if the regidx is a register we should synchronize
143
     * via the cpreg_tuples array (ie is not a core reg we sync by
144
     * hand in kvm_arch_get/put_registers())
145
     */
146
    switch (regidx & KVM_REG_ARM_COPROC_MASK) {
147
    case KVM_REG_ARM_CORE:
148
    case KVM_REG_ARM_VFP:
149
        return false;
150
    default:
151
        return true;
152
    }
153
}
154

    
155
static int compare_u64(const void *a, const void *b)
156
{
157
    if (*(uint64_t *)a > *(uint64_t *)b) {
158
        return 1;
159
    }
160
    if (*(uint64_t *)a < *(uint64_t *)b) {
161
        return -1;
162
    }
163
    return 0;
164
}
165

    
166
int kvm_arch_init_vcpu(CPUState *cs)
167
{
168
    struct kvm_vcpu_init init;
169
    int i, ret, arraylen;
170
    uint64_t v;
171
    struct kvm_one_reg r;
172
    struct kvm_reg_list rl;
173
    struct kvm_reg_list *rlp;
174
    ARMCPU *cpu = ARM_CPU(cs);
175

    
176
    if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
177
        fprintf(stderr, "KVM is not supported for this guest CPU type\n");
178
        return -EINVAL;
179
    }
180

    
181
    init.target = cpu->kvm_target;
182
    memset(init.features, 0, sizeof(init.features));
183
    if (cpu->start_powered_off) {
184
        init.features[0] = 1 << KVM_ARM_VCPU_POWER_OFF;
185
    }
186
    ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
187
    if (ret) {
188
        return ret;
189
    }
190
    /* Query the kernel to make sure it supports 32 VFP
191
     * registers: QEMU's "cortex-a15" CPU is always a
192
     * VFP-D32 core. The simplest way to do this is just
193
     * to attempt to read register d31.
194
     */
195
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
196
    r.addr = (uintptr_t)(&v);
197
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
198
    if (ret == -ENOENT) {
199
        return -EINVAL;
200
    }
201

    
202
    /* Populate the cpreg list based on the kernel's idea
203
     * of what registers exist (and throw away the TCG-created list).
204
     */
205
    rl.n = 0;
206
    ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
207
    if (ret != -E2BIG) {
208
        return ret;
209
    }
210
    rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
211
    rlp->n = rl.n;
212
    ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
213
    if (ret) {
214
        goto out;
215
    }
216
    /* Sort the list we get back from the kernel, since cpreg_tuples
217
     * must be in strictly ascending order.
218
     */
219
    qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
220

    
221
    for (i = 0, arraylen = 0; i < rlp->n; i++) {
222
        if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
223
            continue;
224
        }
225
        switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
226
        case KVM_REG_SIZE_U32:
227
        case KVM_REG_SIZE_U64:
228
            break;
229
        default:
230
            fprintf(stderr, "Can't handle size of register in kernel list\n");
231
            ret = -EINVAL;
232
            goto out;
233
        }
234

    
235
        arraylen++;
236
    }
237

    
238
    cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
239
    cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
240
    cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
241
                                         arraylen);
242
    cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
243
                                        arraylen);
244
    cpu->cpreg_array_len = arraylen;
245
    cpu->cpreg_vmstate_array_len = arraylen;
246

    
247
    for (i = 0, arraylen = 0; i < rlp->n; i++) {
248
        uint64_t regidx = rlp->reg[i];
249
        if (!reg_syncs_via_tuple_list(regidx)) {
250
            continue;
251
        }
252
        cpu->cpreg_indexes[arraylen] = regidx;
253
        arraylen++;
254
    }
255
    assert(cpu->cpreg_array_len == arraylen);
256

    
257
    if (!write_kvmstate_to_list(cpu)) {
258
        /* Shouldn't happen unless kernel is inconsistent about
259
         * what registers exist.
260
         */
261
        fprintf(stderr, "Initial read of kernel register state failed\n");
262
        ret = -EINVAL;
263
        goto out;
264
    }
265

    
266
    /* Save a copy of the initial register values so that we can
267
     * feed it back to the kernel on VCPU reset.
268
     */
269
    cpu->cpreg_reset_values = g_memdup(cpu->cpreg_values,
270
                                       cpu->cpreg_array_len *
271
                                       sizeof(cpu->cpreg_values[0]));
272

    
273
out:
274
    g_free(rlp);
275
    return ret;
276
}
277

    
278
typedef struct Reg {
279
    uint64_t id;
280
    int offset;
281
} Reg;
282

    
283
#define COREREG(KERNELNAME, QEMUFIELD)                       \
284
    {                                                        \
285
        KVM_REG_ARM | KVM_REG_SIZE_U32 |                     \
286
        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
287
        offsetof(CPUARMState, QEMUFIELD)                     \
288
    }
289

    
290
#define VFPSYSREG(R)                                       \
291
    {                                                      \
292
        KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
293
        KVM_REG_ARM_VFP_##R,                               \
294
        offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R])      \
295
    }
296

    
297
static const Reg regs[] = {
298
    /* R0_usr .. R14_usr */
299
    COREREG(usr_regs.uregs[0], regs[0]),
300
    COREREG(usr_regs.uregs[1], regs[1]),
301
    COREREG(usr_regs.uregs[2], regs[2]),
302
    COREREG(usr_regs.uregs[3], regs[3]),
303
    COREREG(usr_regs.uregs[4], regs[4]),
304
    COREREG(usr_regs.uregs[5], regs[5]),
305
    COREREG(usr_regs.uregs[6], regs[6]),
306
    COREREG(usr_regs.uregs[7], regs[7]),
307
    COREREG(usr_regs.uregs[8], usr_regs[0]),
308
    COREREG(usr_regs.uregs[9], usr_regs[1]),
309
    COREREG(usr_regs.uregs[10], usr_regs[2]),
310
    COREREG(usr_regs.uregs[11], usr_regs[3]),
311
    COREREG(usr_regs.uregs[12], usr_regs[4]),
312
    COREREG(usr_regs.uregs[13], banked_r13[0]),
313
    COREREG(usr_regs.uregs[14], banked_r14[0]),
314
    /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
315
    COREREG(svc_regs[0], banked_r13[1]),
316
    COREREG(svc_regs[1], banked_r14[1]),
317
    COREREG(svc_regs[2], banked_spsr[1]),
318
    COREREG(abt_regs[0], banked_r13[2]),
319
    COREREG(abt_regs[1], banked_r14[2]),
320
    COREREG(abt_regs[2], banked_spsr[2]),
321
    COREREG(und_regs[0], banked_r13[3]),
322
    COREREG(und_regs[1], banked_r14[3]),
323
    COREREG(und_regs[2], banked_spsr[3]),
324
    COREREG(irq_regs[0], banked_r13[4]),
325
    COREREG(irq_regs[1], banked_r14[4]),
326
    COREREG(irq_regs[2], banked_spsr[4]),
327
    /* R8_fiq .. R14_fiq and SPSR_fiq */
328
    COREREG(fiq_regs[0], fiq_regs[0]),
329
    COREREG(fiq_regs[1], fiq_regs[1]),
330
    COREREG(fiq_regs[2], fiq_regs[2]),
331
    COREREG(fiq_regs[3], fiq_regs[3]),
332
    COREREG(fiq_regs[4], fiq_regs[4]),
333
    COREREG(fiq_regs[5], banked_r13[5]),
334
    COREREG(fiq_regs[6], banked_r14[5]),
335
    COREREG(fiq_regs[7], banked_spsr[5]),
336
    /* R15 */
337
    COREREG(usr_regs.uregs[15], regs[15]),
338
    /* VFP system registers */
339
    VFPSYSREG(FPSID),
340
    VFPSYSREG(MVFR1),
341
    VFPSYSREG(MVFR0),
342
    VFPSYSREG(FPEXC),
343
    VFPSYSREG(FPINST),
344
    VFPSYSREG(FPINST2),
345
};
346

    
347
int kvm_arch_put_registers(CPUState *cs, int level)
348
{
349
    ARMCPU *cpu = ARM_CPU(cs);
350
    CPUARMState *env = &cpu->env;
351
    struct kvm_one_reg r;
352
    int mode, bn;
353
    int ret, i;
354
    uint32_t cpsr, fpscr;
355

    
356
    /* Make sure the banked regs are properly set */
357
    mode = env->uncached_cpsr & CPSR_M;
358
    bn = bank_number(mode);
359
    if (mode == ARM_CPU_MODE_FIQ) {
360
        memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
361
    } else {
362
        memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
363
    }
364
    env->banked_r13[bn] = env->regs[13];
365
    env->banked_r14[bn] = env->regs[14];
366
    env->banked_spsr[bn] = env->spsr;
367

    
368
    /* Now we can safely copy stuff down to the kernel */
369
    for (i = 0; i < ARRAY_SIZE(regs); i++) {
370
        r.id = regs[i].id;
371
        r.addr = (uintptr_t)(env) + regs[i].offset;
372
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
373
        if (ret) {
374
            return ret;
375
        }
376
    }
377

    
378
    /* Special cases which aren't a single CPUARMState field */
379
    cpsr = cpsr_read(env);
380
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
381
        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
382
    r.addr = (uintptr_t)(&cpsr);
383
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
384
    if (ret) {
385
        return ret;
386
    }
387

    
388
    /* VFP registers */
389
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
390
    for (i = 0; i < 32; i++) {
391
        r.addr = (uintptr_t)(&env->vfp.regs[i]);
392
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
393
        if (ret) {
394
            return ret;
395
        }
396
        r.id++;
397
    }
398

    
399
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
400
        KVM_REG_ARM_VFP_FPSCR;
401
    fpscr = vfp_get_fpscr(env);
402
    r.addr = (uintptr_t)&fpscr;
403
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
404
    if (ret) {
405
        return ret;
406
    }
407

    
408
    /* Note that we do not call write_cpustate_to_list()
409
     * here, so we are only writing the tuple list back to
410
     * KVM. This is safe because nothing can change the
411
     * CPUARMState cp15 fields (in particular gdb accesses cannot)
412
     * and so there are no changes to sync. In fact syncing would
413
     * be wrong at this point: for a constant register where TCG and
414
     * KVM disagree about its value, the preceding write_list_to_cpustate()
415
     * would not have had any effect on the CPUARMState value (since the
416
     * register is read-only), and a write_cpustate_to_list() here would
417
     * then try to write the TCG value back into KVM -- this would either
418
     * fail or incorrectly change the value the guest sees.
419
     *
420
     * If we ever want to allow the user to modify cp15 registers via
421
     * the gdb stub, we would need to be more clever here (for instance
422
     * tracking the set of registers kvm_arch_get_registers() successfully
423
     * managed to update the CPUARMState with, and only allowing those
424
     * to be written back up into the kernel).
425
     */
426
    if (!write_list_to_kvmstate(cpu)) {
427
        return EINVAL;
428
    }
429

    
430
    return ret;
431
}
432

    
433
int kvm_arch_get_registers(CPUState *cs)
434
{
435
    ARMCPU *cpu = ARM_CPU(cs);
436
    CPUARMState *env = &cpu->env;
437
    struct kvm_one_reg r;
438
    int mode, bn;
439
    int ret, i;
440
    uint32_t cpsr, fpscr;
441

    
442
    for (i = 0; i < ARRAY_SIZE(regs); i++) {
443
        r.id = regs[i].id;
444
        r.addr = (uintptr_t)(env) + regs[i].offset;
445
        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
446
        if (ret) {
447
            return ret;
448
        }
449
    }
450

    
451
    /* Special cases which aren't a single CPUARMState field */
452
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
453
        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
454
    r.addr = (uintptr_t)(&cpsr);
455
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
456
    if (ret) {
457
        return ret;
458
    }
459
    cpsr_write(env, cpsr, 0xffffffff);
460

    
461
    /* Make sure the current mode regs are properly set */
462
    mode = env->uncached_cpsr & CPSR_M;
463
    bn = bank_number(mode);
464
    if (mode == ARM_CPU_MODE_FIQ) {
465
        memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
466
    } else {
467
        memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
468
    }
469
    env->regs[13] = env->banked_r13[bn];
470
    env->regs[14] = env->banked_r14[bn];
471
    env->spsr = env->banked_spsr[bn];
472

    
473
    /* VFP registers */
474
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
475
    for (i = 0; i < 32; i++) {
476
        r.addr = (uintptr_t)(&env->vfp.regs[i]);
477
        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
478
        if (ret) {
479
            return ret;
480
        }
481
        r.id++;
482
    }
483

    
484
    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
485
        KVM_REG_ARM_VFP_FPSCR;
486
    r.addr = (uintptr_t)&fpscr;
487
    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
488
    if (ret) {
489
        return ret;
490
    }
491
    vfp_set_fpscr(env, fpscr);
492

    
493
    if (!write_kvmstate_to_list(cpu)) {
494
        return EINVAL;
495
    }
496
    /* Note that it's OK to have registers which aren't in CPUState,
497
     * so we can ignore a failure return here.
498
     */
499
    write_list_to_cpustate(cpu);
500

    
501
    return 0;
502
}
503

    
504
void kvm_arch_reset_vcpu(CPUState *cs)
505
{
506
    /* Feed the kernel back its initial register state */
507
    ARMCPU *cpu = ARM_CPU(cs);
508

    
509
    memmove(cpu->cpreg_values, cpu->cpreg_reset_values,
510
            cpu->cpreg_array_len * sizeof(cpu->cpreg_values[0]));
511

    
512
    if (!write_list_to_kvmstate(cpu)) {
513
        abort();
514
    }
515
}