Statistics
| Branch: | Revision:

root / target-i386 / kvm.c @ 7cc2cc3e

History | View | Annotate | Download (50.1 kB)

1
/*
2
 * QEMU KVM support
3
 *
4
 * Copyright (C) 2006-2008 Qumranet Technologies
5
 * Copyright IBM, Corp. 2008
6
 *
7
 * Authors:
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *
10
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11
 * See the COPYING file in the top-level directory.
12
 *
13
 */
14

    
15
#include <sys/types.h>
16
#include <sys/ioctl.h>
17
#include <sys/mman.h>
18
#include <sys/utsname.h>
19

    
20
#include <linux/kvm.h>
21

    
22
#include "qemu-common.h"
23
#include "sysemu.h"
24
#include "kvm.h"
25
#include "cpu.h"
26
#include "gdbstub.h"
27
#include "host-utils.h"
28
#include "hw/pc.h"
29
#include "hw/apic.h"
30
#include "ioport.h"
31
#include "kvm_x86.h"
32

    
33
#ifdef CONFIG_KVM_PARA
34
#include <linux/kvm_para.h>
35
#endif
36
//
37
//#define DEBUG_KVM
38

    
39
#ifdef DEBUG_KVM
40
#define DPRINTF(fmt, ...) \
41
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
42
#else
43
#define DPRINTF(fmt, ...) \
44
    do { } while (0)
45
#endif
46

    
47
#define MSR_KVM_WALL_CLOCK  0x11
48
#define MSR_KVM_SYSTEM_TIME 0x12
49

    
50
#ifndef BUS_MCEERR_AR
51
#define BUS_MCEERR_AR 4
52
#endif
53
#ifndef BUS_MCEERR_AO
54
#define BUS_MCEERR_AO 5
55
#endif
56

    
57
static int lm_capable_kernel;
58

    
59
#ifdef KVM_CAP_EXT_CPUID
60

    
61
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
62
{
63
    struct kvm_cpuid2 *cpuid;
64
    int r, size;
65

    
66
    size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
67
    cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
68
    cpuid->nent = max;
69
    r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
70
    if (r == 0 && cpuid->nent >= max) {
71
        r = -E2BIG;
72
    }
73
    if (r < 0) {
74
        if (r == -E2BIG) {
75
            qemu_free(cpuid);
76
            return NULL;
77
        } else {
78
            fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
79
                    strerror(-r));
80
            exit(1);
81
        }
82
    }
83
    return cpuid;
84
}
85

    
86
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
87
                                      uint32_t index, int reg)
88
{
89
    struct kvm_cpuid2 *cpuid;
90
    int i, max;
91
    uint32_t ret = 0;
92
    uint32_t cpuid_1_edx;
93

    
94
    if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
95
        return -1U;
96
    }
97

    
98
    max = 1;
99
    while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
100
        max *= 2;
101
    }
102

    
103
    for (i = 0; i < cpuid->nent; ++i) {
104
        if (cpuid->entries[i].function == function &&
105
            cpuid->entries[i].index == index) {
106
            switch (reg) {
107
            case R_EAX:
108
                ret = cpuid->entries[i].eax;
109
                break;
110
            case R_EBX:
111
                ret = cpuid->entries[i].ebx;
112
                break;
113
            case R_ECX:
114
                ret = cpuid->entries[i].ecx;
115
                break;
116
            case R_EDX:
117
                ret = cpuid->entries[i].edx;
118
                switch (function) {
119
                case 1:
120
                    /* KVM before 2.6.30 misreports the following features */
121
                    ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
122
                    break;
123
                case 0x80000001:
124
                    /* On Intel, kvm returns cpuid according to the Intel spec,
125
                     * so add missing bits according to the AMD spec:
126
                     */
127
                    cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
128
                    ret |= cpuid_1_edx & 0x183f7ff;
129
                    break;
130
                }
131
                break;
132
            }
133
        }
134
    }
135

    
136
    qemu_free(cpuid);
137

    
138
    return ret;
139
}
140

    
141
#else
142

    
143
uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
144
                                      uint32_t index, int reg)
145
{
146
    return -1U;
147
}
148

    
149
#endif
150

    
151
#ifdef CONFIG_KVM_PARA
152
struct kvm_para_features {
153
        int cap;
154
        int feature;
155
} para_features[] = {
156
#ifdef KVM_CAP_CLOCKSOURCE
157
        { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
158
#endif
159
#ifdef KVM_CAP_NOP_IO_DELAY
160
        { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
161
#endif
162
#ifdef KVM_CAP_PV_MMU
163
        { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
164
#endif
165
#ifdef KVM_CAP_ASYNC_PF
166
        { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
167
#endif
168
        { -1, -1 }
169
};
170

    
171
static int get_para_features(CPUState *env)
172
{
173
        int i, features = 0;
174

    
175
        for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
176
                if (kvm_check_extension(env->kvm_state, para_features[i].cap))
177
                        features |= (1 << para_features[i].feature);
178
        }
179

    
180
        return features;
181
}
182
#endif
183

    
184
#ifdef KVM_CAP_MCE
185
static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
186
                                     int *max_banks)
187
{
188
    int r;
189

    
190
    r = kvm_check_extension(s, KVM_CAP_MCE);
191
    if (r > 0) {
192
        *max_banks = r;
193
        return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
194
    }
195
    return -ENOSYS;
196
}
197

    
198
static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap)
199
{
200
    return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
201
}
202

    
203
static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m)
204
{
205
    return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
206
}
207

    
208
static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n)
209
{
210
    struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs);
211
    int r;
212

    
213
    kmsrs->nmsrs = n;
214
    memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
215
    r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs);
216
    memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
217
    free(kmsrs);
218
    return r;
219
}
220

    
221
/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
222
static int kvm_mce_in_progress(CPUState *env)
223
{
224
    struct kvm_msr_entry msr_mcg_status = {
225
        .index = MSR_MCG_STATUS,
226
    };
227
    int r;
228

    
229
    r = kvm_get_msr(env, &msr_mcg_status, 1);
230
    if (r == -1 || r == 0) {
231
        fprintf(stderr, "Failed to get MCE status\n");
232
        return 0;
233
    }
234
    return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
235
}
236

    
237
struct kvm_x86_mce_data
238
{
239
    CPUState *env;
240
    struct kvm_x86_mce *mce;
241
    int abort_on_error;
242
};
243

    
244
static void kvm_do_inject_x86_mce(void *_data)
245
{
246
    struct kvm_x86_mce_data *data = _data;
247
    int r;
248

    
249
    /* If there is an MCE exception being processed, ignore this SRAO MCE */
250
    if ((data->env->mcg_cap & MCG_SER_P) &&
251
        !(data->mce->status & MCI_STATUS_AR)) {
252
        if (kvm_mce_in_progress(data->env)) {
253
            return;
254
        }
255
    }
256

    
257
    r = kvm_set_mce(data->env, data->mce);
258
    if (r < 0) {
259
        perror("kvm_set_mce FAILED");
260
        if (data->abort_on_error) {
261
            abort();
262
        }
263
    }
264
}
265

    
266
static void kvm_inject_x86_mce_on(CPUState *env, struct kvm_x86_mce *mce,
267
                                  int flag)
268
{
269
    struct kvm_x86_mce_data data = {
270
        .env = env,
271
        .mce = mce,
272
        .abort_on_error = (flag & ABORT_ON_ERROR),
273
    };
274

    
275
    if (!env->mcg_cap) {
276
        fprintf(stderr, "MCE support is not enabled!\n");
277
        return;
278
    }
279

    
280
    run_on_cpu(env, kvm_do_inject_x86_mce, &data);
281
}
282

    
283
static void kvm_mce_broadcast_rest(CPUState *env);
284
#endif
285

    
286
void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
287
                        uint64_t mcg_status, uint64_t addr, uint64_t misc,
288
                        int flag)
289
{
290
#ifdef KVM_CAP_MCE
291
    struct kvm_x86_mce mce = {
292
        .bank = bank,
293
        .status = status,
294
        .mcg_status = mcg_status,
295
        .addr = addr,
296
        .misc = misc,
297
    };
298

    
299
    if (flag & MCE_BROADCAST) {
300
        kvm_mce_broadcast_rest(cenv);
301
    }
302

    
303
    kvm_inject_x86_mce_on(cenv, &mce, flag);
304
#else
305
    if (flag & ABORT_ON_ERROR) {
306
        abort();
307
    }
308
#endif
309
}
310

    
311
int kvm_arch_init_vcpu(CPUState *env)
312
{
313
    struct {
314
        struct kvm_cpuid2 cpuid;
315
        struct kvm_cpuid_entry2 entries[100];
316
    } __attribute__((packed)) cpuid_data;
317
    uint32_t limit, i, j, cpuid_i;
318
    uint32_t unused;
319
    struct kvm_cpuid_entry2 *c;
320
#ifdef KVM_CPUID_SIGNATURE
321
    uint32_t signature[3];
322
#endif
323

    
324
    env->mp_state = KVM_MP_STATE_RUNNABLE;
325

    
326
    env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
327

    
328
    i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
329
    env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
330
    env->cpuid_ext_features |= i;
331

    
332
    env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
333
                                                             0, R_EDX);
334
    env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
335
                                                             0, R_ECX);
336
    env->cpuid_svm_features  &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
337
                                                             0, R_EDX);
338

    
339

    
340
    cpuid_i = 0;
341

    
342
#ifdef CONFIG_KVM_PARA
343
    /* Paravirtualization CPUIDs */
344
    memcpy(signature, "KVMKVMKVM\0\0\0", 12);
345
    c = &cpuid_data.entries[cpuid_i++];
346
    memset(c, 0, sizeof(*c));
347
    c->function = KVM_CPUID_SIGNATURE;
348
    c->eax = 0;
349
    c->ebx = signature[0];
350
    c->ecx = signature[1];
351
    c->edx = signature[2];
352

    
353
    c = &cpuid_data.entries[cpuid_i++];
354
    memset(c, 0, sizeof(*c));
355
    c->function = KVM_CPUID_FEATURES;
356
    c->eax = env->cpuid_kvm_features & get_para_features(env);
357
#endif
358

    
359
    cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
360

    
361
    for (i = 0; i <= limit; i++) {
362
        c = &cpuid_data.entries[cpuid_i++];
363

    
364
        switch (i) {
365
        case 2: {
366
            /* Keep reading function 2 till all the input is received */
367
            int times;
368

    
369
            c->function = i;
370
            c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
371
                       KVM_CPUID_FLAG_STATE_READ_NEXT;
372
            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
373
            times = c->eax & 0xff;
374

    
375
            for (j = 1; j < times; ++j) {
376
                c = &cpuid_data.entries[cpuid_i++];
377
                c->function = i;
378
                c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
379
                cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
380
            }
381
            break;
382
        }
383
        case 4:
384
        case 0xb:
385
        case 0xd:
386
            for (j = 0; ; j++) {
387
                c->function = i;
388
                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
389
                c->index = j;
390
                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
391

    
392
                if (i == 4 && c->eax == 0)
393
                    break;
394
                if (i == 0xb && !(c->ecx & 0xff00))
395
                    break;
396
                if (i == 0xd && c->eax == 0)
397
                    break;
398

    
399
                c = &cpuid_data.entries[cpuid_i++];
400
            }
401
            break;
402
        default:
403
            c->function = i;
404
            c->flags = 0;
405
            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
406
            break;
407
        }
408
    }
409
    cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
410

    
411
    for (i = 0x80000000; i <= limit; i++) {
412
        c = &cpuid_data.entries[cpuid_i++];
413

    
414
        c->function = i;
415
        c->flags = 0;
416
        cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
417
    }
418

    
419
    cpuid_data.cpuid.nent = cpuid_i;
420

    
421
#ifdef KVM_CAP_MCE
422
    if (((env->cpuid_version >> 8)&0xF) >= 6
423
        && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
424
        && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
425
        uint64_t mcg_cap;
426
        int banks;
427

    
428
        if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks))
429
            perror("kvm_get_mce_cap_supported FAILED");
430
        else {
431
            if (banks > MCE_BANKS_DEF)
432
                banks = MCE_BANKS_DEF;
433
            mcg_cap &= MCE_CAP_DEF;
434
            mcg_cap |= banks;
435
            if (kvm_setup_mce(env, &mcg_cap))
436
                perror("kvm_setup_mce FAILED");
437
            else
438
                env->mcg_cap = mcg_cap;
439
        }
440
    }
441
#endif
442

    
443
    return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
444
}
445

    
446
void kvm_arch_reset_vcpu(CPUState *env)
447
{
448
    env->exception_injected = -1;
449
    env->interrupt_injected = -1;
450
    env->nmi_injected = 0;
451
    env->nmi_pending = 0;
452
    if (kvm_irqchip_in_kernel()) {
453
        env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
454
                                          KVM_MP_STATE_UNINITIALIZED;
455
    } else {
456
        env->mp_state = KVM_MP_STATE_RUNNABLE;
457
    }
458
}
459

    
460
int has_msr_star;
461
int has_msr_hsave_pa;
462

    
463
static void kvm_supported_msrs(CPUState *env)
464
{
465
    static int kvm_supported_msrs;
466
    int ret;
467

    
468
    /* first time */
469
    if (kvm_supported_msrs == 0) {
470
        struct kvm_msr_list msr_list, *kvm_msr_list;
471

    
472
        kvm_supported_msrs = -1;
473

    
474
        /* Obtain MSR list from KVM.  These are the MSRs that we must
475
         * save/restore */
476
        msr_list.nmsrs = 0;
477
        ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
478
        if (ret < 0 && ret != -E2BIG) {
479
            return;
480
        }
481
        /* Old kernel modules had a bug and could write beyond the provided
482
           memory. Allocate at least a safe amount of 1K. */
483
        kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
484
                                              msr_list.nmsrs *
485
                                              sizeof(msr_list.indices[0])));
486

    
487
        kvm_msr_list->nmsrs = msr_list.nmsrs;
488
        ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
489
        if (ret >= 0) {
490
            int i;
491

    
492
            for (i = 0; i < kvm_msr_list->nmsrs; i++) {
493
                if (kvm_msr_list->indices[i] == MSR_STAR) {
494
                    has_msr_star = 1;
495
                    continue;
496
                }
497
                if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
498
                    has_msr_hsave_pa = 1;
499
                    continue;
500
                }
501
            }
502
        }
503

    
504
        free(kvm_msr_list);
505
    }
506

    
507
    return;
508
}
509

    
510
static int kvm_has_msr_hsave_pa(CPUState *env)
511
{
512
    kvm_supported_msrs(env);
513
    return has_msr_hsave_pa;
514
}
515

    
516
static int kvm_has_msr_star(CPUState *env)
517
{
518
    kvm_supported_msrs(env);
519
    return has_msr_star;
520
}
521

    
522
static int kvm_init_identity_map_page(KVMState *s)
523
{
524
#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
525
    int ret;
526
    uint64_t addr = 0xfffbc000;
527

    
528
    if (!kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
529
        return 0;
530
    }
531

    
532
    ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &addr);
533
    if (ret < 0) {
534
        fprintf(stderr, "kvm_set_identity_map_addr: %s\n", strerror(ret));
535
        return ret;
536
    }
537
#endif
538
    return 0;
539
}
540

    
541
int kvm_arch_init(KVMState *s, int smp_cpus)
542
{
543
    int ret;
544

    
545
    struct utsname utsname;
546

    
547
    uname(&utsname);
548
    lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
549

    
550
    /* create vm86 tss.  KVM uses vm86 mode to emulate 16-bit code
551
     * directly.  In order to use vm86 mode, a TSS is needed.  Since this
552
     * must be part of guest physical memory, we need to allocate it.  Older
553
     * versions of KVM just assumed that it would be at the end of physical
554
     * memory but that doesn't work with more than 4GB of memory.  We simply
555
     * refuse to work with those older versions of KVM. */
556
    ret = kvm_check_extension(s, KVM_CAP_SET_TSS_ADDR);
557
    if (ret <= 0) {
558
        fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
559
        return ret;
560
    }
561

    
562
    /* this address is 3 pages before the bios, and the bios should present
563
     * as unavaible memory.  FIXME, need to ensure the e820 map deals with
564
     * this?
565
     */
566
    /*
567
     * Tell fw_cfg to notify the BIOS to reserve the range.
568
     */
569
    if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) {
570
        perror("e820_add_entry() table is full");
571
        exit(1);
572
    }
573
    ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
574
    if (ret < 0) {
575
        return ret;
576
    }
577

    
578
    return kvm_init_identity_map_page(s);
579
}
580
                    
581
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
582
{
583
    lhs->selector = rhs->selector;
584
    lhs->base = rhs->base;
585
    lhs->limit = rhs->limit;
586
    lhs->type = 3;
587
    lhs->present = 1;
588
    lhs->dpl = 3;
589
    lhs->db = 0;
590
    lhs->s = 1;
591
    lhs->l = 0;
592
    lhs->g = 0;
593
    lhs->avl = 0;
594
    lhs->unusable = 0;
595
}
596

    
597
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
598
{
599
    unsigned flags = rhs->flags;
600
    lhs->selector = rhs->selector;
601
    lhs->base = rhs->base;
602
    lhs->limit = rhs->limit;
603
    lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
604
    lhs->present = (flags & DESC_P_MASK) != 0;
605
    lhs->dpl = rhs->selector & 3;
606
    lhs->db = (flags >> DESC_B_SHIFT) & 1;
607
    lhs->s = (flags & DESC_S_MASK) != 0;
608
    lhs->l = (flags >> DESC_L_SHIFT) & 1;
609
    lhs->g = (flags & DESC_G_MASK) != 0;
610
    lhs->avl = (flags & DESC_AVL_MASK) != 0;
611
    lhs->unusable = 0;
612
}
613

    
614
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
615
{
616
    lhs->selector = rhs->selector;
617
    lhs->base = rhs->base;
618
    lhs->limit = rhs->limit;
619
    lhs->flags =
620
        (rhs->type << DESC_TYPE_SHIFT)
621
        | (rhs->present * DESC_P_MASK)
622
        | (rhs->dpl << DESC_DPL_SHIFT)
623
        | (rhs->db << DESC_B_SHIFT)
624
        | (rhs->s * DESC_S_MASK)
625
        | (rhs->l << DESC_L_SHIFT)
626
        | (rhs->g * DESC_G_MASK)
627
        | (rhs->avl * DESC_AVL_MASK);
628
}
629

    
630
static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
631
{
632
    if (set)
633
        *kvm_reg = *qemu_reg;
634
    else
635
        *qemu_reg = *kvm_reg;
636
}
637

    
638
static int kvm_getput_regs(CPUState *env, int set)
639
{
640
    struct kvm_regs regs;
641
    int ret = 0;
642

    
643
    if (!set) {
644
        ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
645
        if (ret < 0)
646
            return ret;
647
    }
648

    
649
    kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
650
    kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
651
    kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
652
    kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
653
    kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
654
    kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
655
    kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
656
    kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
657
#ifdef TARGET_X86_64
658
    kvm_getput_reg(&regs.r8, &env->regs[8], set);
659
    kvm_getput_reg(&regs.r9, &env->regs[9], set);
660
    kvm_getput_reg(&regs.r10, &env->regs[10], set);
661
    kvm_getput_reg(&regs.r11, &env->regs[11], set);
662
    kvm_getput_reg(&regs.r12, &env->regs[12], set);
663
    kvm_getput_reg(&regs.r13, &env->regs[13], set);
664
    kvm_getput_reg(&regs.r14, &env->regs[14], set);
665
    kvm_getput_reg(&regs.r15, &env->regs[15], set);
666
#endif
667

    
668
    kvm_getput_reg(&regs.rflags, &env->eflags, set);
669
    kvm_getput_reg(&regs.rip, &env->eip, set);
670

    
671
    if (set)
672
        ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
673

    
674
    return ret;
675
}
676

    
677
static int kvm_put_fpu(CPUState *env)
678
{
679
    struct kvm_fpu fpu;
680
    int i;
681

    
682
    memset(&fpu, 0, sizeof fpu);
683
    fpu.fsw = env->fpus & ~(7 << 11);
684
    fpu.fsw |= (env->fpstt & 7) << 11;
685
    fpu.fcw = env->fpuc;
686
    for (i = 0; i < 8; ++i)
687
        fpu.ftwx |= (!env->fptags[i]) << i;
688
    memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
689
    memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
690
    fpu.mxcsr = env->mxcsr;
691

    
692
    return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
693
}
694

    
695
#ifdef KVM_CAP_XSAVE
696
#define XSAVE_CWD_RIP     2
697
#define XSAVE_CWD_RDP     4
698
#define XSAVE_MXCSR       6
699
#define XSAVE_ST_SPACE    8
700
#define XSAVE_XMM_SPACE   40
701
#define XSAVE_XSTATE_BV   128
702
#define XSAVE_YMMH_SPACE  144
703
#endif
704

    
705
static int kvm_put_xsave(CPUState *env)
706
{
707
#ifdef KVM_CAP_XSAVE
708
    int i, r;
709
    struct kvm_xsave* xsave;
710
    uint16_t cwd, swd, twd, fop;
711

    
712
    if (!kvm_has_xsave())
713
        return kvm_put_fpu(env);
714

    
715
    xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
716
    memset(xsave, 0, sizeof(struct kvm_xsave));
717
    cwd = swd = twd = fop = 0;
718
    swd = env->fpus & ~(7 << 11);
719
    swd |= (env->fpstt & 7) << 11;
720
    cwd = env->fpuc;
721
    for (i = 0; i < 8; ++i)
722
        twd |= (!env->fptags[i]) << i;
723
    xsave->region[0] = (uint32_t)(swd << 16) + cwd;
724
    xsave->region[1] = (uint32_t)(fop << 16) + twd;
725
    memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
726
            sizeof env->fpregs);
727
    memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
728
            sizeof env->xmm_regs);
729
    xsave->region[XSAVE_MXCSR] = env->mxcsr;
730
    *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
731
    memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
732
            sizeof env->ymmh_regs);
733
    r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
734
    qemu_free(xsave);
735
    return r;
736
#else
737
    return kvm_put_fpu(env);
738
#endif
739
}
740

    
741
static int kvm_put_xcrs(CPUState *env)
742
{
743
#ifdef KVM_CAP_XCRS
744
    struct kvm_xcrs xcrs;
745

    
746
    if (!kvm_has_xcrs())
747
        return 0;
748

    
749
    xcrs.nr_xcrs = 1;
750
    xcrs.flags = 0;
751
    xcrs.xcrs[0].xcr = 0;
752
    xcrs.xcrs[0].value = env->xcr0;
753
    return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
754
#else
755
    return 0;
756
#endif
757
}
758

    
759
static int kvm_put_sregs(CPUState *env)
760
{
761
    struct kvm_sregs sregs;
762

    
763
    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
764
    if (env->interrupt_injected >= 0) {
765
        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
766
                (uint64_t)1 << (env->interrupt_injected % 64);
767
    }
768

    
769
    if ((env->eflags & VM_MASK)) {
770
            set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
771
            set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
772
            set_v8086_seg(&sregs.es, &env->segs[R_ES]);
773
            set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
774
            set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
775
            set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
776
    } else {
777
            set_seg(&sregs.cs, &env->segs[R_CS]);
778
            set_seg(&sregs.ds, &env->segs[R_DS]);
779
            set_seg(&sregs.es, &env->segs[R_ES]);
780
            set_seg(&sregs.fs, &env->segs[R_FS]);
781
            set_seg(&sregs.gs, &env->segs[R_GS]);
782
            set_seg(&sregs.ss, &env->segs[R_SS]);
783

    
784
            if (env->cr[0] & CR0_PE_MASK) {
785
                /* force ss cpl to cs cpl */
786
                sregs.ss.selector = (sregs.ss.selector & ~3) |
787
                        (sregs.cs.selector & 3);
788
                sregs.ss.dpl = sregs.ss.selector & 3;
789
            }
790
    }
791

    
792
    set_seg(&sregs.tr, &env->tr);
793
    set_seg(&sregs.ldt, &env->ldt);
794

    
795
    sregs.idt.limit = env->idt.limit;
796
    sregs.idt.base = env->idt.base;
797
    sregs.gdt.limit = env->gdt.limit;
798
    sregs.gdt.base = env->gdt.base;
799

    
800
    sregs.cr0 = env->cr[0];
801
    sregs.cr2 = env->cr[2];
802
    sregs.cr3 = env->cr[3];
803
    sregs.cr4 = env->cr[4];
804

    
805
    sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
806
    sregs.apic_base = cpu_get_apic_base(env->apic_state);
807

    
808
    sregs.efer = env->efer;
809

    
810
    return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
811
}
812

    
813
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
814
                              uint32_t index, uint64_t value)
815
{
816
    entry->index = index;
817
    entry->data = value;
818
}
819

    
820
static int kvm_put_msrs(CPUState *env, int level)
821
{
822
    struct {
823
        struct kvm_msrs info;
824
        struct kvm_msr_entry entries[100];
825
    } msr_data;
826
    struct kvm_msr_entry *msrs = msr_data.entries;
827
    int n = 0;
828

    
829
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
830
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
831
    kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
832
    if (kvm_has_msr_star(env))
833
        kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
834
    if (kvm_has_msr_hsave_pa(env))
835
        kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
836
#ifdef TARGET_X86_64
837
    if (lm_capable_kernel) {
838
        kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
839
        kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
840
        kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
841
        kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
842
    }
843
#endif
844
    if (level == KVM_PUT_FULL_STATE) {
845
        /*
846
         * KVM is yet unable to synchronize TSC values of multiple VCPUs on
847
         * writeback. Until this is fixed, we only write the offset to SMP
848
         * guests after migration, desynchronizing the VCPUs, but avoiding
849
         * huge jump-backs that would occur without any writeback at all.
850
         */
851
        if (smp_cpus == 1 || env->tsc != 0) {
852
            kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
853
        }
854
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
855
                          env->system_time_msr);
856
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
857
#ifdef KVM_CAP_ASYNC_PF
858
        kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
859
#endif
860
    }
861
#ifdef KVM_CAP_MCE
862
    if (env->mcg_cap) {
863
        int i;
864
        if (level == KVM_PUT_RESET_STATE)
865
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
866
        else if (level == KVM_PUT_FULL_STATE) {
867
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
868
            kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
869
            for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
870
                kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
871
        }
872
    }
873
#endif
874

    
875
    msr_data.info.nmsrs = n;
876

    
877
    return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
878

    
879
}
880

    
881

    
882
static int kvm_get_fpu(CPUState *env)
883
{
884
    struct kvm_fpu fpu;
885
    int i, ret;
886

    
887
    ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
888
    if (ret < 0)
889
        return ret;
890

    
891
    env->fpstt = (fpu.fsw >> 11) & 7;
892
    env->fpus = fpu.fsw;
893
    env->fpuc = fpu.fcw;
894
    for (i = 0; i < 8; ++i)
895
        env->fptags[i] = !((fpu.ftwx >> i) & 1);
896
    memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
897
    memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
898
    env->mxcsr = fpu.mxcsr;
899

    
900
    return 0;
901
}
902

    
903
static int kvm_get_xsave(CPUState *env)
904
{
905
#ifdef KVM_CAP_XSAVE
906
    struct kvm_xsave* xsave;
907
    int ret, i;
908
    uint16_t cwd, swd, twd, fop;
909

    
910
    if (!kvm_has_xsave())
911
        return kvm_get_fpu(env);
912

    
913
    xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
914
    ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
915
    if (ret < 0) {
916
        qemu_free(xsave);
917
        return ret;
918
    }
919

    
920
    cwd = (uint16_t)xsave->region[0];
921
    swd = (uint16_t)(xsave->region[0] >> 16);
922
    twd = (uint16_t)xsave->region[1];
923
    fop = (uint16_t)(xsave->region[1] >> 16);
924
    env->fpstt = (swd >> 11) & 7;
925
    env->fpus = swd;
926
    env->fpuc = cwd;
927
    for (i = 0; i < 8; ++i)
928
        env->fptags[i] = !((twd >> i) & 1);
929
    env->mxcsr = xsave->region[XSAVE_MXCSR];
930
    memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
931
            sizeof env->fpregs);
932
    memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
933
            sizeof env->xmm_regs);
934
    env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
935
    memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
936
            sizeof env->ymmh_regs);
937
    qemu_free(xsave);
938
    return 0;
939
#else
940
    return kvm_get_fpu(env);
941
#endif
942
}
943

    
944
static int kvm_get_xcrs(CPUState *env)
945
{
946
#ifdef KVM_CAP_XCRS
947
    int i, ret;
948
    struct kvm_xcrs xcrs;
949

    
950
    if (!kvm_has_xcrs())
951
        return 0;
952

    
953
    ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
954
    if (ret < 0)
955
        return ret;
956

    
957
    for (i = 0; i < xcrs.nr_xcrs; i++)
958
        /* Only support xcr0 now */
959
        if (xcrs.xcrs[0].xcr == 0) {
960
            env->xcr0 = xcrs.xcrs[0].value;
961
            break;
962
        }
963
    return 0;
964
#else
965
    return 0;
966
#endif
967
}
968

    
969
static int kvm_get_sregs(CPUState *env)
970
{
971
    struct kvm_sregs sregs;
972
    uint32_t hflags;
973
    int bit, i, ret;
974

    
975
    ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
976
    if (ret < 0)
977
        return ret;
978

    
979
    /* There can only be one pending IRQ set in the bitmap at a time, so try
980
       to find it and save its number instead (-1 for none). */
981
    env->interrupt_injected = -1;
982
    for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
983
        if (sregs.interrupt_bitmap[i]) {
984
            bit = ctz64(sregs.interrupt_bitmap[i]);
985
            env->interrupt_injected = i * 64 + bit;
986
            break;
987
        }
988
    }
989

    
990
    get_seg(&env->segs[R_CS], &sregs.cs);
991
    get_seg(&env->segs[R_DS], &sregs.ds);
992
    get_seg(&env->segs[R_ES], &sregs.es);
993
    get_seg(&env->segs[R_FS], &sregs.fs);
994
    get_seg(&env->segs[R_GS], &sregs.gs);
995
    get_seg(&env->segs[R_SS], &sregs.ss);
996

    
997
    get_seg(&env->tr, &sregs.tr);
998
    get_seg(&env->ldt, &sregs.ldt);
999

    
1000
    env->idt.limit = sregs.idt.limit;
1001
    env->idt.base = sregs.idt.base;
1002
    env->gdt.limit = sregs.gdt.limit;
1003
    env->gdt.base = sregs.gdt.base;
1004

    
1005
    env->cr[0] = sregs.cr0;
1006
    env->cr[2] = sregs.cr2;
1007
    env->cr[3] = sregs.cr3;
1008
    env->cr[4] = sregs.cr4;
1009

    
1010
    cpu_set_apic_base(env->apic_state, sregs.apic_base);
1011

    
1012
    env->efer = sregs.efer;
1013
    //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
1014

    
1015
#define HFLAG_COPY_MASK ~( \
1016
                        HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1017
                        HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1018
                        HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1019
                        HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1020

    
1021

    
1022

    
1023
    hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1024
    hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1025
    hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
1026
            (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1027
    hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1028
    hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
1029
            (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1030

    
1031
    if (env->efer & MSR_EFER_LMA) {
1032
        hflags |= HF_LMA_MASK;
1033
    }
1034

    
1035
    if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1036
        hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1037
    } else {
1038
        hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
1039
                (DESC_B_SHIFT - HF_CS32_SHIFT);
1040
        hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
1041
                (DESC_B_SHIFT - HF_SS32_SHIFT);
1042
        if (!(env->cr[0] & CR0_PE_MASK) ||
1043
                   (env->eflags & VM_MASK) ||
1044
                   !(hflags & HF_CS32_MASK)) {
1045
                hflags |= HF_ADDSEG_MASK;
1046
            } else {
1047
                hflags |= ((env->segs[R_DS].base |
1048
                                env->segs[R_ES].base |
1049
                                env->segs[R_SS].base) != 0) <<
1050
                    HF_ADDSEG_SHIFT;
1051
            }
1052
    }
1053
    env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
1054

    
1055
    return 0;
1056
}
1057

    
1058
static int kvm_get_msrs(CPUState *env)
1059
{
1060
    struct {
1061
        struct kvm_msrs info;
1062
        struct kvm_msr_entry entries[100];
1063
    } msr_data;
1064
    struct kvm_msr_entry *msrs = msr_data.entries;
1065
    int ret, i, n;
1066

    
1067
    n = 0;
1068
    msrs[n++].index = MSR_IA32_SYSENTER_CS;
1069
    msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1070
    msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1071
    if (kvm_has_msr_star(env))
1072
        msrs[n++].index = MSR_STAR;
1073
    if (kvm_has_msr_hsave_pa(env))
1074
        msrs[n++].index = MSR_VM_HSAVE_PA;
1075
    msrs[n++].index = MSR_IA32_TSC;
1076
#ifdef TARGET_X86_64
1077
    if (lm_capable_kernel) {
1078
        msrs[n++].index = MSR_CSTAR;
1079
        msrs[n++].index = MSR_KERNELGSBASE;
1080
        msrs[n++].index = MSR_FMASK;
1081
        msrs[n++].index = MSR_LSTAR;
1082
    }
1083
#endif
1084
    msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1085
    msrs[n++].index = MSR_KVM_WALL_CLOCK;
1086
#ifdef KVM_CAP_ASYNC_PF
1087
    msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1088
#endif
1089

    
1090
#ifdef KVM_CAP_MCE
1091
    if (env->mcg_cap) {
1092
        msrs[n++].index = MSR_MCG_STATUS;
1093
        msrs[n++].index = MSR_MCG_CTL;
1094
        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
1095
            msrs[n++].index = MSR_MC0_CTL + i;
1096
    }
1097
#endif
1098

    
1099
    msr_data.info.nmsrs = n;
1100
    ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
1101
    if (ret < 0)
1102
        return ret;
1103

    
1104
    for (i = 0; i < ret; i++) {
1105
        switch (msrs[i].index) {
1106
        case MSR_IA32_SYSENTER_CS:
1107
            env->sysenter_cs = msrs[i].data;
1108
            break;
1109
        case MSR_IA32_SYSENTER_ESP:
1110
            env->sysenter_esp = msrs[i].data;
1111
            break;
1112
        case MSR_IA32_SYSENTER_EIP:
1113
            env->sysenter_eip = msrs[i].data;
1114
            break;
1115
        case MSR_STAR:
1116
            env->star = msrs[i].data;
1117
            break;
1118
#ifdef TARGET_X86_64
1119
        case MSR_CSTAR:
1120
            env->cstar = msrs[i].data;
1121
            break;
1122
        case MSR_KERNELGSBASE:
1123
            env->kernelgsbase = msrs[i].data;
1124
            break;
1125
        case MSR_FMASK:
1126
            env->fmask = msrs[i].data;
1127
            break;
1128
        case MSR_LSTAR:
1129
            env->lstar = msrs[i].data;
1130
            break;
1131
#endif
1132
        case MSR_IA32_TSC:
1133
            env->tsc = msrs[i].data;
1134
            break;
1135
        case MSR_VM_HSAVE_PA:
1136
            env->vm_hsave = msrs[i].data;
1137
            break;
1138
        case MSR_KVM_SYSTEM_TIME:
1139
            env->system_time_msr = msrs[i].data;
1140
            break;
1141
        case MSR_KVM_WALL_CLOCK:
1142
            env->wall_clock_msr = msrs[i].data;
1143
            break;
1144
#ifdef KVM_CAP_MCE
1145
        case MSR_MCG_STATUS:
1146
            env->mcg_status = msrs[i].data;
1147
            break;
1148
        case MSR_MCG_CTL:
1149
            env->mcg_ctl = msrs[i].data;
1150
            break;
1151
#endif
1152
        default:
1153
#ifdef KVM_CAP_MCE
1154
            if (msrs[i].index >= MSR_MC0_CTL &&
1155
                msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
1156
                env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
1157
            }
1158
#endif
1159
            break;
1160
#ifdef KVM_CAP_ASYNC_PF
1161
        case MSR_KVM_ASYNC_PF_EN:
1162
            env->async_pf_en_msr = msrs[i].data;
1163
            break;
1164
#endif
1165
        }
1166
    }
1167

    
1168
    return 0;
1169
}
1170

    
1171
static int kvm_put_mp_state(CPUState *env)
1172
{
1173
    struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
1174

    
1175
    return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
1176
}
1177

    
1178
static int kvm_get_mp_state(CPUState *env)
1179
{
1180
    struct kvm_mp_state mp_state;
1181
    int ret;
1182

    
1183
    ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
1184
    if (ret < 0) {
1185
        return ret;
1186
    }
1187
    env->mp_state = mp_state.mp_state;
1188
    return 0;
1189
}
1190

    
1191
static int kvm_put_vcpu_events(CPUState *env, int level)
1192
{
1193
#ifdef KVM_CAP_VCPU_EVENTS
1194
    struct kvm_vcpu_events events;
1195

    
1196
    if (!kvm_has_vcpu_events()) {
1197
        return 0;
1198
    }
1199

    
1200
    events.exception.injected = (env->exception_injected >= 0);
1201
    events.exception.nr = env->exception_injected;
1202
    events.exception.has_error_code = env->has_error_code;
1203
    events.exception.error_code = env->error_code;
1204

    
1205
    events.interrupt.injected = (env->interrupt_injected >= 0);
1206
    events.interrupt.nr = env->interrupt_injected;
1207
    events.interrupt.soft = env->soft_interrupt;
1208

    
1209
    events.nmi.injected = env->nmi_injected;
1210
    events.nmi.pending = env->nmi_pending;
1211
    events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
1212

    
1213
    events.sipi_vector = env->sipi_vector;
1214

    
1215
    events.flags = 0;
1216
    if (level >= KVM_PUT_RESET_STATE) {
1217
        events.flags |=
1218
            KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1219
    }
1220

    
1221
    return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
1222
#else
1223
    return 0;
1224
#endif
1225
}
1226

    
1227
static int kvm_get_vcpu_events(CPUState *env)
1228
{
1229
#ifdef KVM_CAP_VCPU_EVENTS
1230
    struct kvm_vcpu_events events;
1231
    int ret;
1232

    
1233
    if (!kvm_has_vcpu_events()) {
1234
        return 0;
1235
    }
1236

    
1237
    ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
1238
    if (ret < 0) {
1239
       return ret;
1240
    }
1241
    env->exception_injected =
1242
       events.exception.injected ? events.exception.nr : -1;
1243
    env->has_error_code = events.exception.has_error_code;
1244
    env->error_code = events.exception.error_code;
1245

    
1246
    env->interrupt_injected =
1247
        events.interrupt.injected ? events.interrupt.nr : -1;
1248
    env->soft_interrupt = events.interrupt.soft;
1249

    
1250
    env->nmi_injected = events.nmi.injected;
1251
    env->nmi_pending = events.nmi.pending;
1252
    if (events.nmi.masked) {
1253
        env->hflags2 |= HF2_NMI_MASK;
1254
    } else {
1255
        env->hflags2 &= ~HF2_NMI_MASK;
1256
    }
1257

    
1258
    env->sipi_vector = events.sipi_vector;
1259
#endif
1260

    
1261
    return 0;
1262
}
1263

    
1264
static int kvm_guest_debug_workarounds(CPUState *env)
1265
{
1266
    int ret = 0;
1267
#ifdef KVM_CAP_SET_GUEST_DEBUG
1268
    unsigned long reinject_trap = 0;
1269

    
1270
    if (!kvm_has_vcpu_events()) {
1271
        if (env->exception_injected == 1) {
1272
            reinject_trap = KVM_GUESTDBG_INJECT_DB;
1273
        } else if (env->exception_injected == 3) {
1274
            reinject_trap = KVM_GUESTDBG_INJECT_BP;
1275
        }
1276
        env->exception_injected = -1;
1277
    }
1278

    
1279
    /*
1280
     * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1281
     * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1282
     * by updating the debug state once again if single-stepping is on.
1283
     * Another reason to call kvm_update_guest_debug here is a pending debug
1284
     * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1285
     * reinject them via SET_GUEST_DEBUG.
1286
     */
1287
    if (reinject_trap ||
1288
        (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
1289
        ret = kvm_update_guest_debug(env, reinject_trap);
1290
    }
1291
#endif /* KVM_CAP_SET_GUEST_DEBUG */
1292
    return ret;
1293
}
1294

    
1295
static int kvm_put_debugregs(CPUState *env)
1296
{
1297
#ifdef KVM_CAP_DEBUGREGS
1298
    struct kvm_debugregs dbgregs;
1299
    int i;
1300

    
1301
    if (!kvm_has_debugregs()) {
1302
        return 0;
1303
    }
1304

    
1305
    for (i = 0; i < 4; i++) {
1306
        dbgregs.db[i] = env->dr[i];
1307
    }
1308
    dbgregs.dr6 = env->dr[6];
1309
    dbgregs.dr7 = env->dr[7];
1310
    dbgregs.flags = 0;
1311

    
1312
    return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
1313
#else
1314
    return 0;
1315
#endif
1316
}
1317

    
1318
static int kvm_get_debugregs(CPUState *env)
1319
{
1320
#ifdef KVM_CAP_DEBUGREGS
1321
    struct kvm_debugregs dbgregs;
1322
    int i, ret;
1323

    
1324
    if (!kvm_has_debugregs()) {
1325
        return 0;
1326
    }
1327

    
1328
    ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
1329
    if (ret < 0) {
1330
       return ret;
1331
    }
1332
    for (i = 0; i < 4; i++) {
1333
        env->dr[i] = dbgregs.db[i];
1334
    }
1335
    env->dr[4] = env->dr[6] = dbgregs.dr6;
1336
    env->dr[5] = env->dr[7] = dbgregs.dr7;
1337
#endif
1338

    
1339
    return 0;
1340
}
1341

    
1342
int kvm_arch_put_registers(CPUState *env, int level)
1343
{
1344
    int ret;
1345

    
1346
    assert(cpu_is_stopped(env) || qemu_cpu_self(env));
1347

    
1348
    ret = kvm_getput_regs(env, 1);
1349
    if (ret < 0)
1350
        return ret;
1351

    
1352
    ret = kvm_put_xsave(env);
1353
    if (ret < 0)
1354
        return ret;
1355

    
1356
    ret = kvm_put_xcrs(env);
1357
    if (ret < 0)
1358
        return ret;
1359

    
1360
    ret = kvm_put_sregs(env);
1361
    if (ret < 0)
1362
        return ret;
1363

    
1364
    ret = kvm_put_msrs(env, level);
1365
    if (ret < 0)
1366
        return ret;
1367

    
1368
    if (level >= KVM_PUT_RESET_STATE) {
1369
        ret = kvm_put_mp_state(env);
1370
        if (ret < 0)
1371
            return ret;
1372
    }
1373

    
1374
    ret = kvm_put_vcpu_events(env, level);
1375
    if (ret < 0)
1376
        return ret;
1377

    
1378
    /* must be last */
1379
    ret = kvm_guest_debug_workarounds(env);
1380
    if (ret < 0)
1381
        return ret;
1382

    
1383
    ret = kvm_put_debugregs(env);
1384
    if (ret < 0)
1385
        return ret;
1386

    
1387
    return 0;
1388
}
1389

    
1390
int kvm_arch_get_registers(CPUState *env)
1391
{
1392
    int ret;
1393

    
1394
    assert(cpu_is_stopped(env) || qemu_cpu_self(env));
1395

    
1396
    ret = kvm_getput_regs(env, 0);
1397
    if (ret < 0)
1398
        return ret;
1399

    
1400
    ret = kvm_get_xsave(env);
1401
    if (ret < 0)
1402
        return ret;
1403

    
1404
    ret = kvm_get_xcrs(env);
1405
    if (ret < 0)
1406
        return ret;
1407

    
1408
    ret = kvm_get_sregs(env);
1409
    if (ret < 0)
1410
        return ret;
1411

    
1412
    ret = kvm_get_msrs(env);
1413
    if (ret < 0)
1414
        return ret;
1415

    
1416
    ret = kvm_get_mp_state(env);
1417
    if (ret < 0)
1418
        return ret;
1419

    
1420
    ret = kvm_get_vcpu_events(env);
1421
    if (ret < 0)
1422
        return ret;
1423

    
1424
    ret = kvm_get_debugregs(env);
1425
    if (ret < 0)
1426
        return ret;
1427

    
1428
    return 0;
1429
}
1430

    
1431
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
1432
{
1433
    /* Inject NMI */
1434
    if (env->interrupt_request & CPU_INTERRUPT_NMI) {
1435
        env->interrupt_request &= ~CPU_INTERRUPT_NMI;
1436
        DPRINTF("injected NMI\n");
1437
        kvm_vcpu_ioctl(env, KVM_NMI);
1438
    }
1439

    
1440
    /* Try to inject an interrupt if the guest can accept it */
1441
    if (run->ready_for_interrupt_injection &&
1442
        (env->interrupt_request & CPU_INTERRUPT_HARD) &&
1443
        (env->eflags & IF_MASK)) {
1444
        int irq;
1445

    
1446
        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
1447
        irq = cpu_get_pic_interrupt(env);
1448
        if (irq >= 0) {
1449
            struct kvm_interrupt intr;
1450
            intr.irq = irq;
1451
            /* FIXME: errors */
1452
            DPRINTF("injected interrupt %d\n", irq);
1453
            kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
1454
        }
1455
    }
1456

    
1457
    /* If we have an interrupt but the guest is not ready to receive an
1458
     * interrupt, request an interrupt window exit.  This will
1459
     * cause a return to userspace as soon as the guest is ready to
1460
     * receive interrupts. */
1461
    if ((env->interrupt_request & CPU_INTERRUPT_HARD))
1462
        run->request_interrupt_window = 1;
1463
    else
1464
        run->request_interrupt_window = 0;
1465

    
1466
    DPRINTF("setting tpr\n");
1467
    run->cr8 = cpu_get_apic_tpr(env->apic_state);
1468

    
1469
    return 0;
1470
}
1471

    
1472
int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
1473
{
1474
    if (run->if_flag)
1475
        env->eflags |= IF_MASK;
1476
    else
1477
        env->eflags &= ~IF_MASK;
1478
    
1479
    cpu_set_apic_tpr(env->apic_state, run->cr8);
1480
    cpu_set_apic_base(env->apic_state, run->apic_base);
1481

    
1482
    return 0;
1483
}
1484

    
1485
int kvm_arch_process_irqchip_events(CPUState *env)
1486
{
1487
    if (env->interrupt_request & CPU_INTERRUPT_INIT) {
1488
        kvm_cpu_synchronize_state(env);
1489
        do_cpu_init(env);
1490
        env->exception_index = EXCP_HALTED;
1491
    }
1492

    
1493
    if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
1494
        kvm_cpu_synchronize_state(env);
1495
        do_cpu_sipi(env);
1496
    }
1497

    
1498
    return env->halted;
1499
}
1500

    
1501
static int kvm_handle_halt(CPUState *env)
1502
{
1503
    if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
1504
          (env->eflags & IF_MASK)) &&
1505
        !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
1506
        env->halted = 1;
1507
        env->exception_index = EXCP_HLT;
1508
        return 0;
1509
    }
1510

    
1511
    return 1;
1512
}
1513

    
1514
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
1515
{
1516
    int ret = 0;
1517

    
1518
    switch (run->exit_reason) {
1519
    case KVM_EXIT_HLT:
1520
        DPRINTF("handle_hlt\n");
1521
        ret = kvm_handle_halt(env);
1522
        break;
1523
    }
1524

    
1525
    return ret;
1526
}
1527

    
1528
#ifdef KVM_CAP_SET_GUEST_DEBUG
1529
int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1530
{
1531
    static const uint8_t int3 = 0xcc;
1532

    
1533
    if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
1534
        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
1535
        return -EINVAL;
1536
    return 0;
1537
}
1538

    
1539
int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1540
{
1541
    uint8_t int3;
1542

    
1543
    if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
1544
        cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
1545
        return -EINVAL;
1546
    return 0;
1547
}
1548

    
1549
static struct {
1550
    target_ulong addr;
1551
    int len;
1552
    int type;
1553
} hw_breakpoint[4];
1554

    
1555
static int nb_hw_breakpoint;
1556

    
1557
static int find_hw_breakpoint(target_ulong addr, int len, int type)
1558
{
1559
    int n;
1560

    
1561
    for (n = 0; n < nb_hw_breakpoint; n++)
1562
        if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
1563
            (hw_breakpoint[n].len == len || len == -1))
1564
            return n;
1565
    return -1;
1566
}
1567

    
1568
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1569
                                  target_ulong len, int type)
1570
{
1571
    switch (type) {
1572
    case GDB_BREAKPOINT_HW:
1573
        len = 1;
1574
        break;
1575
    case GDB_WATCHPOINT_WRITE:
1576
    case GDB_WATCHPOINT_ACCESS:
1577
        switch (len) {
1578
        case 1:
1579
            break;
1580
        case 2:
1581
        case 4:
1582
        case 8:
1583
            if (addr & (len - 1))
1584
                return -EINVAL;
1585
            break;
1586
        default:
1587
            return -EINVAL;
1588
        }
1589
        break;
1590
    default:
1591
        return -ENOSYS;
1592
    }
1593

    
1594
    if (nb_hw_breakpoint == 4)
1595
        return -ENOBUFS;
1596

    
1597
    if (find_hw_breakpoint(addr, len, type) >= 0)
1598
        return -EEXIST;
1599

    
1600
    hw_breakpoint[nb_hw_breakpoint].addr = addr;
1601
    hw_breakpoint[nb_hw_breakpoint].len = len;
1602
    hw_breakpoint[nb_hw_breakpoint].type = type;
1603
    nb_hw_breakpoint++;
1604

    
1605
    return 0;
1606
}
1607

    
1608
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1609
                                  target_ulong len, int type)
1610
{
1611
    int n;
1612

    
1613
    n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
1614
    if (n < 0)
1615
        return -ENOENT;
1616

    
1617
    nb_hw_breakpoint--;
1618
    hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1619

    
1620
    return 0;
1621
}
1622

    
1623
void kvm_arch_remove_all_hw_breakpoints(void)
1624
{
1625
    nb_hw_breakpoint = 0;
1626
}
1627

    
1628
static CPUWatchpoint hw_watchpoint;
1629

    
1630
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
1631
{
1632
    int handle = 0;
1633
    int n;
1634

    
1635
    if (arch_info->exception == 1) {
1636
        if (arch_info->dr6 & (1 << 14)) {
1637
            if (cpu_single_env->singlestep_enabled)
1638
                handle = 1;
1639
        } else {
1640
            for (n = 0; n < 4; n++)
1641
                if (arch_info->dr6 & (1 << n))
1642
                    switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1643
                    case 0x0:
1644
                        handle = 1;
1645
                        break;
1646
                    case 0x1:
1647
                        handle = 1;
1648
                        cpu_single_env->watchpoint_hit = &hw_watchpoint;
1649
                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1650
                        hw_watchpoint.flags = BP_MEM_WRITE;
1651
                        break;
1652
                    case 0x3:
1653
                        handle = 1;
1654
                        cpu_single_env->watchpoint_hit = &hw_watchpoint;
1655
                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1656
                        hw_watchpoint.flags = BP_MEM_ACCESS;
1657
                        break;
1658
                    }
1659
        }
1660
    } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
1661
        handle = 1;
1662

    
1663
    if (!handle) {
1664
        cpu_synchronize_state(cpu_single_env);
1665
        assert(cpu_single_env->exception_injected == -1);
1666

    
1667
        cpu_single_env->exception_injected = arch_info->exception;
1668
        cpu_single_env->has_error_code = 0;
1669
    }
1670

    
1671
    return handle;
1672
}
1673

    
1674
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
1675
{
1676
    const uint8_t type_code[] = {
1677
        [GDB_BREAKPOINT_HW] = 0x0,
1678
        [GDB_WATCHPOINT_WRITE] = 0x1,
1679
        [GDB_WATCHPOINT_ACCESS] = 0x3
1680
    };
1681
    const uint8_t len_code[] = {
1682
        [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1683
    };
1684
    int n;
1685

    
1686
    if (kvm_sw_breakpoints_active(env))
1687
        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1688

    
1689
    if (nb_hw_breakpoint > 0) {
1690
        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1691
        dbg->arch.debugreg[7] = 0x0600;
1692
        for (n = 0; n < nb_hw_breakpoint; n++) {
1693
            dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1694
            dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1695
                (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
1696
                (len_code[hw_breakpoint[n].len] << (18 + n*4));
1697
        }
1698
    }
1699
    /* Legal xcr0 for loading */
1700
    env->xcr0 = 1;
1701
}
1702
#endif /* KVM_CAP_SET_GUEST_DEBUG */
1703

    
1704
bool kvm_arch_stop_on_emulation_error(CPUState *env)
1705
{
1706
      return !(env->cr[0] & CR0_PE_MASK) ||
1707
              ((env->segs[R_CS].selector  & 3) != 3);
1708
}
1709

    
1710
static void hardware_memory_error(void)
1711
{
1712
    fprintf(stderr, "Hardware memory error!\n");
1713
    exit(1);
1714
}
1715

    
1716
#ifdef KVM_CAP_MCE
1717
static void kvm_mce_broadcast_rest(CPUState *env)
1718
{
1719
    struct kvm_x86_mce mce = {
1720
        .bank = 1,
1721
        .status = MCI_STATUS_VAL | MCI_STATUS_UC,
1722
        .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
1723
        .addr = 0,
1724
        .misc = 0,
1725
    };
1726
    CPUState *cenv;
1727

    
1728
    /* Broadcast MCA signal for processor version 06H_EH and above */
1729
    if (cpu_x86_support_mca_broadcast(env)) {
1730
        for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
1731
            if (cenv == env) {
1732
                continue;
1733
            }
1734
            kvm_inject_x86_mce_on(cenv, &mce, ABORT_ON_ERROR);
1735
        }
1736
    }
1737
}
1738

    
1739
static void kvm_mce_inj_srar_dataload(CPUState *env, target_phys_addr_t paddr)
1740
{
1741
    struct kvm_x86_mce mce = {
1742
        .bank = 9,
1743
        .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1744
                  | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1745
                  | MCI_STATUS_AR | 0x134,
1746
        .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV,
1747
        .addr = paddr,
1748
        .misc = (MCM_ADDR_PHYS << 6) | 0xc,
1749
    };
1750
    int r;
1751

    
1752
    r = kvm_set_mce(env, &mce);
1753
    if (r < 0) {
1754
        fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
1755
        abort();
1756
    }
1757
    kvm_mce_broadcast_rest(env);
1758
}
1759

    
1760
static void kvm_mce_inj_srao_memscrub(CPUState *env, target_phys_addr_t paddr)
1761
{
1762
    struct kvm_x86_mce mce = {
1763
        .bank = 9,
1764
        .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1765
                  | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1766
                  | 0xc0,
1767
        .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
1768
        .addr = paddr,
1769
        .misc = (MCM_ADDR_PHYS << 6) | 0xc,
1770
    };
1771
    int r;
1772

    
1773
    r = kvm_set_mce(env, &mce);
1774
    if (r < 0) {
1775
        fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
1776
        abort();
1777
    }
1778
    kvm_mce_broadcast_rest(env);
1779
}
1780

    
1781
static void kvm_mce_inj_srao_memscrub2(CPUState *env, target_phys_addr_t paddr)
1782
{
1783
    struct kvm_x86_mce mce = {
1784
        .bank = 9,
1785
        .status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
1786
                  | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
1787
                  | 0xc0,
1788
        .mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV,
1789
        .addr = paddr,
1790
        .misc = (MCM_ADDR_PHYS << 6) | 0xc,
1791
    };
1792

    
1793
    kvm_inject_x86_mce_on(env, &mce, ABORT_ON_ERROR);
1794
    kvm_mce_broadcast_rest(env);
1795
}
1796

    
1797
#endif
1798

    
1799
int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
1800
{
1801
#if defined(KVM_CAP_MCE)
1802
    void *vaddr;
1803
    ram_addr_t ram_addr;
1804
    target_phys_addr_t paddr;
1805

    
1806
    if ((env->mcg_cap & MCG_SER_P) && addr
1807
        && (code == BUS_MCEERR_AR
1808
            || code == BUS_MCEERR_AO)) {
1809
        vaddr = (void *)addr;
1810
        if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
1811
            !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) {
1812
            fprintf(stderr, "Hardware memory error for memory used by "
1813
                    "QEMU itself instead of guest system!\n");
1814
            /* Hope we are lucky for AO MCE */
1815
            if (code == BUS_MCEERR_AO) {
1816
                return 0;
1817
            } else {
1818
                hardware_memory_error();
1819
            }
1820
        }
1821

    
1822
        if (code == BUS_MCEERR_AR) {
1823
            /* Fake an Intel architectural Data Load SRAR UCR */
1824
            kvm_mce_inj_srar_dataload(env, paddr);
1825
        } else {
1826
            /*
1827
             * If there is an MCE excpetion being processed, ignore
1828
             * this SRAO MCE
1829
             */
1830
            if (!kvm_mce_in_progress(env)) {
1831
                /* Fake an Intel architectural Memory scrubbing UCR */
1832
                kvm_mce_inj_srao_memscrub(env, paddr);
1833
            }
1834
        }
1835
    } else
1836
#endif
1837
    {
1838
        if (code == BUS_MCEERR_AO) {
1839
            return 0;
1840
        } else if (code == BUS_MCEERR_AR) {
1841
            hardware_memory_error();
1842
        } else {
1843
            return 1;
1844
        }
1845
    }
1846
    return 0;
1847
}
1848

    
1849
int kvm_on_sigbus(int code, void *addr)
1850
{
1851
#if defined(KVM_CAP_MCE)
1852
    if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
1853
        void *vaddr;
1854
        ram_addr_t ram_addr;
1855
        target_phys_addr_t paddr;
1856

    
1857
        /* Hope we are lucky for AO MCE */
1858
        vaddr = addr;
1859
        if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
1860
            !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) {
1861
            fprintf(stderr, "Hardware memory error for memory used by "
1862
                    "QEMU itself instead of guest system!: %p\n", addr);
1863
            return 0;
1864
        }
1865
        kvm_mce_inj_srao_memscrub2(first_cpu, paddr);
1866
    } else
1867
#endif
1868
    {
1869
        if (code == BUS_MCEERR_AO) {
1870
            return 0;
1871
        } else if (code == BUS_MCEERR_AR) {
1872
            hardware_memory_error();
1873
        } else {
1874
            return 1;
1875
        }
1876
    }
1877
    return 0;
1878
}