Statistics
| Branch: | Revision:

root / kvm-all.c @ f8bfb1dc

History | View | Annotate | Download (27.3 kB)

1
/*
2
 * QEMU KVM support
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *           Red Hat, Inc. 2008
6
 *
7
 * Authors:
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *  Glauber Costa     <gcosta@redhat.com>
10
 *
11
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12
 * See the COPYING file in the top-level directory.
13
 *
14
 */
15

    
16
#include <sys/types.h>
17
#include <sys/ioctl.h>
18
#include <sys/mman.h>
19
#include <stdarg.h>
20

    
21
#include <linux/kvm.h>
22

    
23
#include "qemu-common.h"
24
#include "sysemu.h"
25
#include "hw/hw.h"
26
#include "gdbstub.h"
27
#include "kvm.h"
28

    
29
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
30
#define PAGE_SIZE TARGET_PAGE_SIZE
31

    
32
//#define DEBUG_KVM
33

    
34
#ifdef DEBUG_KVM
35
#define dprintf(fmt, ...) \
36
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
37
#else
38
#define dprintf(fmt, ...) \
39
    do { } while (0)
40
#endif
41

    
42
typedef struct KVMSlot
43
{
44
    target_phys_addr_t start_addr;
45
    ram_addr_t memory_size;
46
    ram_addr_t phys_offset;
47
    int slot;
48
    int flags;
49
} KVMSlot;
50

    
51
typedef struct kvm_dirty_log KVMDirtyLog;
52

    
53
int kvm_allowed = 0;
54

    
55
struct KVMState
56
{
57
    KVMSlot slots[32];
58
    int fd;
59
    int vmfd;
60
    int coalesced_mmio;
61
    int broken_set_mem_region;
62
    int migration_log;
63
#ifdef KVM_CAP_SET_GUEST_DEBUG
64
    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
65
#endif
66
    int irqchip_in_kernel;
67
    int pit_in_kernel;
68
};
69

    
70
static KVMState *kvm_state;
71

    
72
static KVMSlot *kvm_alloc_slot(KVMState *s)
73
{
74
    int i;
75

    
76
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
77
        /* KVM private memory slots */
78
        if (i >= 8 && i < 12)
79
            continue;
80
        if (s->slots[i].memory_size == 0)
81
            return &s->slots[i];
82
    }
83

    
84
    fprintf(stderr, "%s: no free slot available\n", __func__);
85
    abort();
86
}
87

    
88
static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
89
                                         target_phys_addr_t start_addr,
90
                                         target_phys_addr_t end_addr)
91
{
92
    int i;
93

    
94
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
95
        KVMSlot *mem = &s->slots[i];
96

    
97
        if (start_addr == mem->start_addr &&
98
            end_addr == mem->start_addr + mem->memory_size) {
99
            return mem;
100
        }
101
    }
102

    
103
    return NULL;
104
}
105

    
106
/*
107
 * Find overlapping slot with lowest start address
108
 */
109
static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
110
                                            target_phys_addr_t start_addr,
111
                                            target_phys_addr_t end_addr)
112
{
113
    KVMSlot *found = NULL;
114
    int i;
115

    
116
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
117
        KVMSlot *mem = &s->slots[i];
118

    
119
        if (mem->memory_size == 0 ||
120
            (found && found->start_addr < mem->start_addr)) {
121
            continue;
122
        }
123

    
124
        if (end_addr > mem->start_addr &&
125
            start_addr < mem->start_addr + mem->memory_size) {
126
            found = mem;
127
        }
128
    }
129

    
130
    return found;
131
}
132

    
133
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
134
{
135
    struct kvm_userspace_memory_region mem;
136

    
137
    mem.slot = slot->slot;
138
    mem.guest_phys_addr = slot->start_addr;
139
    mem.memory_size = slot->memory_size;
140
    mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset);
141
    mem.flags = slot->flags;
142
    if (s->migration_log) {
143
        mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
144
    }
145
    return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
146
}
147

    
148
static void kvm_reset_vcpu(void *opaque)
149
{
150
    CPUState *env = opaque;
151

    
152
    if (kvm_arch_put_registers(env)) {
153
        fprintf(stderr, "Fatal: kvm vcpu reset failed\n");
154
        abort();
155
    }
156
}
157

    
158
int kvm_irqchip_in_kernel(void)
159
{
160
    return kvm_state->irqchip_in_kernel;
161
}
162

    
163
int kvm_pit_in_kernel(void)
164
{
165
    return kvm_state->pit_in_kernel;
166
}
167

    
168

    
169
int kvm_init_vcpu(CPUState *env)
170
{
171
    KVMState *s = kvm_state;
172
    long mmap_size;
173
    int ret;
174

    
175
    dprintf("kvm_init_vcpu\n");
176

    
177
    ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
178
    if (ret < 0) {
179
        dprintf("kvm_create_vcpu failed\n");
180
        goto err;
181
    }
182

    
183
    env->kvm_fd = ret;
184
    env->kvm_state = s;
185

    
186
    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
187
    if (mmap_size < 0) {
188
        dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
189
        goto err;
190
    }
191

    
192
    env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
193
                        env->kvm_fd, 0);
194
    if (env->kvm_run == MAP_FAILED) {
195
        ret = -errno;
196
        dprintf("mmap'ing vcpu state failed\n");
197
        goto err;
198
    }
199

    
200
    ret = kvm_arch_init_vcpu(env);
201
    if (ret == 0) {
202
        qemu_register_reset(kvm_reset_vcpu, env);
203
        ret = kvm_arch_put_registers(env);
204
    }
205
err:
206
    return ret;
207
}
208

    
209
int kvm_put_mp_state(CPUState *env)
210
{
211
    struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
212

    
213
    return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
214
}
215

    
216
int kvm_get_mp_state(CPUState *env)
217
{
218
    struct kvm_mp_state mp_state;
219
    int ret;
220

    
221
    ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
222
    if (ret < 0) {
223
        return ret;
224
    }
225
    env->mp_state = mp_state.mp_state;
226
    return 0;
227
}
228

    
229
/*
230
 * dirty pages logging control
231
 */
232
static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
233
                                      ram_addr_t size, int flags, int mask)
234
{
235
    KVMState *s = kvm_state;
236
    KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
237
    int old_flags;
238

    
239
    if (mem == NULL)  {
240
            fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
241
                    TARGET_FMT_plx "\n", __func__, phys_addr,
242
                    (target_phys_addr_t)(phys_addr + size - 1));
243
            return -EINVAL;
244
    }
245

    
246
    old_flags = mem->flags;
247

    
248
    flags = (mem->flags & ~mask) | flags;
249
    mem->flags = flags;
250

    
251
    /* If nothing changed effectively, no need to issue ioctl */
252
    if (s->migration_log) {
253
        flags |= KVM_MEM_LOG_DIRTY_PAGES;
254
    }
255
    if (flags == old_flags) {
256
            return 0;
257
    }
258

    
259
    return kvm_set_user_memory_region(s, mem);
260
}
261

    
262
int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
263
{
264
        return kvm_dirty_pages_log_change(phys_addr, size,
265
                                          KVM_MEM_LOG_DIRTY_PAGES,
266
                                          KVM_MEM_LOG_DIRTY_PAGES);
267
}
268

    
269
int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
270
{
271
        return kvm_dirty_pages_log_change(phys_addr, size,
272
                                          0,
273
                                          KVM_MEM_LOG_DIRTY_PAGES);
274
}
275

    
276
int kvm_set_migration_log(int enable)
277
{
278
    KVMState *s = kvm_state;
279
    KVMSlot *mem;
280
    int i, err;
281

    
282
    s->migration_log = enable;
283

    
284
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
285
        mem = &s->slots[i];
286

    
287
        if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
288
            continue;
289
        }
290
        err = kvm_set_user_memory_region(s, mem);
291
        if (err) {
292
            return err;
293
        }
294
    }
295
    return 0;
296
}
297

    
298
static int test_le_bit(unsigned long nr, unsigned char *addr)
299
{
300
    return (addr[nr >> 3] >> (nr & 7)) & 1;
301
}
302

    
303
/**
304
 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
305
 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
306
 * This means all bits are set to dirty.
307
 *
308
 * @start_add: start of logged region.
309
 * @end_addr: end of logged region.
310
 */
311
int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
312
                                   target_phys_addr_t end_addr)
313
{
314
    KVMState *s = kvm_state;
315
    unsigned long size, allocated_size = 0;
316
    target_phys_addr_t phys_addr;
317
    ram_addr_t addr;
318
    KVMDirtyLog d;
319
    KVMSlot *mem;
320
    int ret = 0;
321
    int r;
322

    
323
    d.dirty_bitmap = NULL;
324
    while (start_addr < end_addr) {
325
        mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
326
        if (mem == NULL) {
327
            break;
328
        }
329

    
330
        /* We didn't activate dirty logging? Don't care then. */
331
        if(!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
332
            continue;
333
        }
334

    
335
        size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
336
        if (!d.dirty_bitmap) {
337
            d.dirty_bitmap = qemu_malloc(size);
338
        } else if (size > allocated_size) {
339
            d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size);
340
        }
341
        allocated_size = size;
342
        memset(d.dirty_bitmap, 0, allocated_size);
343

    
344
        d.slot = mem->slot;
345

    
346
        r = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
347
        if (r == -EINVAL) {
348
            dprintf("ioctl failed %d\n", errno);
349
            ret = -1;
350
            break;
351
        }
352

    
353
        for (phys_addr = mem->start_addr, addr = mem->phys_offset;
354
             phys_addr < mem->start_addr + mem->memory_size;
355
             phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
356
            unsigned char *bitmap = (unsigned char *)d.dirty_bitmap;
357
            unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
358

    
359
            if (test_le_bit(nr, bitmap)) {
360
                cpu_physical_memory_set_dirty(addr);
361
            } else if (r < 0) {
362
                /* When our KVM implementation doesn't know about dirty logging
363
                 * we can just assume it's always dirty and be fine. */
364
                cpu_physical_memory_set_dirty(addr);
365
            }
366
        }
367
        start_addr = phys_addr;
368
    }
369
    qemu_free(d.dirty_bitmap);
370

    
371
    return ret;
372
}
373

    
374
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
375
{
376
    int ret = -ENOSYS;
377
#ifdef KVM_CAP_COALESCED_MMIO
378
    KVMState *s = kvm_state;
379

    
380
    if (s->coalesced_mmio) {
381
        struct kvm_coalesced_mmio_zone zone;
382

    
383
        zone.addr = start;
384
        zone.size = size;
385

    
386
        ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
387
    }
388
#endif
389

    
390
    return ret;
391
}
392

    
393
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
394
{
395
    int ret = -ENOSYS;
396
#ifdef KVM_CAP_COALESCED_MMIO
397
    KVMState *s = kvm_state;
398

    
399
    if (s->coalesced_mmio) {
400
        struct kvm_coalesced_mmio_zone zone;
401

    
402
        zone.addr = start;
403
        zone.size = size;
404

    
405
        ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
406
    }
407
#endif
408

    
409
    return ret;
410
}
411

    
412
int kvm_check_extension(KVMState *s, unsigned int extension)
413
{
414
    int ret;
415

    
416
    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
417
    if (ret < 0) {
418
        ret = 0;
419
    }
420

    
421
    return ret;
422
}
423

    
424
int kvm_init(int smp_cpus)
425
{
426
    static const char upgrade_note[] =
427
        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
428
        "(see http://sourceforge.net/projects/kvm).\n";
429
    KVMState *s;
430
    int ret;
431
    int i;
432

    
433
    if (smp_cpus > 1) {
434
        fprintf(stderr, "No SMP KVM support, use '-smp 1'\n");
435
        return -EINVAL;
436
    }
437

    
438
    s = qemu_mallocz(sizeof(KVMState));
439

    
440
#ifdef KVM_CAP_SET_GUEST_DEBUG
441
    TAILQ_INIT(&s->kvm_sw_breakpoints);
442
#endif
443
    for (i = 0; i < ARRAY_SIZE(s->slots); i++)
444
        s->slots[i].slot = i;
445

    
446
    s->vmfd = -1;
447
    s->fd = open("/dev/kvm", O_RDWR);
448
    if (s->fd == -1) {
449
        fprintf(stderr, "Could not access KVM kernel module: %m\n");
450
        ret = -errno;
451
        goto err;
452
    }
453

    
454
    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
455
    if (ret < KVM_API_VERSION) {
456
        if (ret > 0)
457
            ret = -EINVAL;
458
        fprintf(stderr, "kvm version too old\n");
459
        goto err;
460
    }
461

    
462
    if (ret > KVM_API_VERSION) {
463
        ret = -EINVAL;
464
        fprintf(stderr, "kvm version not supported\n");
465
        goto err;
466
    }
467

    
468
    s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
469
    if (s->vmfd < 0)
470
        goto err;
471

    
472
    /* initially, KVM allocated its own memory and we had to jump through
473
     * hooks to make phys_ram_base point to this.  Modern versions of KVM
474
     * just use a user allocated buffer so we can use regular pages
475
     * unmodified.  Make sure we have a sufficiently modern version of KVM.
476
     */
477
    if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) {
478
        ret = -EINVAL;
479
        fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n%s",
480
                upgrade_note);
481
        goto err;
482
    }
483

    
484
    /* There was a nasty bug in < kvm-80 that prevents memory slots from being
485
     * destroyed properly.  Since we rely on this capability, refuse to work
486
     * with any kernel without this capability. */
487
    if (!kvm_check_extension(s, KVM_CAP_DESTROY_MEMORY_REGION_WORKS)) {
488
        ret = -EINVAL;
489

    
490
        fprintf(stderr,
491
                "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
492
                upgrade_note);
493
        goto err;
494
    }
495

    
496
#ifdef KVM_CAP_COALESCED_MMIO
497
    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
498
#else
499
    s->coalesced_mmio = 0;
500
#endif
501

    
502
    s->broken_set_mem_region = 1;
503
#ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
504
    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
505
    if (ret > 0) {
506
        s->broken_set_mem_region = 0;
507
    }
508
#endif
509

    
510
    ret = kvm_arch_init(s, smp_cpus);
511
    if (ret < 0)
512
        goto err;
513

    
514
    kvm_state = s;
515

    
516
    return 0;
517

    
518
err:
519
    if (s) {
520
        if (s->vmfd != -1)
521
            close(s->vmfd);
522
        if (s->fd != -1)
523
            close(s->fd);
524
    }
525
    qemu_free(s);
526

    
527
    return ret;
528
}
529

    
530
static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
531
                         int direction, int size, uint32_t count)
532
{
533
    int i;
534
    uint8_t *ptr = data;
535

    
536
    for (i = 0; i < count; i++) {
537
        if (direction == KVM_EXIT_IO_IN) {
538
            switch (size) {
539
            case 1:
540
                stb_p(ptr, cpu_inb(env, port));
541
                break;
542
            case 2:
543
                stw_p(ptr, cpu_inw(env, port));
544
                break;
545
            case 4:
546
                stl_p(ptr, cpu_inl(env, port));
547
                break;
548
            }
549
        } else {
550
            switch (size) {
551
            case 1:
552
                cpu_outb(env, port, ldub_p(ptr));
553
                break;
554
            case 2:
555
                cpu_outw(env, port, lduw_p(ptr));
556
                break;
557
            case 4:
558
                cpu_outl(env, port, ldl_p(ptr));
559
                break;
560
            }
561
        }
562

    
563
        ptr += size;
564
    }
565

    
566
    return 1;
567
}
568

    
569
static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
570
{
571
#ifdef KVM_CAP_COALESCED_MMIO
572
    KVMState *s = kvm_state;
573
    if (s->coalesced_mmio) {
574
        struct kvm_coalesced_mmio_ring *ring;
575

    
576
        ring = (void *)run + (s->coalesced_mmio * TARGET_PAGE_SIZE);
577
        while (ring->first != ring->last) {
578
            struct kvm_coalesced_mmio *ent;
579

    
580
            ent = &ring->coalesced_mmio[ring->first];
581

    
582
            cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
583
            /* FIXME smp_wmb() */
584
            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
585
        }
586
    }
587
#endif
588
}
589

    
590
int kvm_cpu_exec(CPUState *env)
591
{
592
    struct kvm_run *run = env->kvm_run;
593
    int ret;
594

    
595
    dprintf("kvm_cpu_exec()\n");
596

    
597
    do {
598
        if (env->exit_request) {
599
            dprintf("interrupt exit requested\n");
600
            ret = 0;
601
            break;
602
        }
603

    
604
        kvm_arch_pre_run(env, run);
605
        ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
606
        kvm_arch_post_run(env, run);
607

    
608
        if (ret == -EINTR || ret == -EAGAIN) {
609
            dprintf("io window exit\n");
610
            ret = 0;
611
            break;
612
        }
613

    
614
        if (ret < 0) {
615
            dprintf("kvm run failed %s\n", strerror(-ret));
616
            abort();
617
        }
618

    
619
        kvm_run_coalesced_mmio(env, run);
620

    
621
        ret = 0; /* exit loop */
622
        switch (run->exit_reason) {
623
        case KVM_EXIT_IO:
624
            dprintf("handle_io\n");
625
            ret = kvm_handle_io(env, run->io.port,
626
                                (uint8_t *)run + run->io.data_offset,
627
                                run->io.direction,
628
                                run->io.size,
629
                                run->io.count);
630
            break;
631
        case KVM_EXIT_MMIO:
632
            dprintf("handle_mmio\n");
633
            cpu_physical_memory_rw(run->mmio.phys_addr,
634
                                   run->mmio.data,
635
                                   run->mmio.len,
636
                                   run->mmio.is_write);
637
            ret = 1;
638
            break;
639
        case KVM_EXIT_IRQ_WINDOW_OPEN:
640
            dprintf("irq_window_open\n");
641
            break;
642
        case KVM_EXIT_SHUTDOWN:
643
            dprintf("shutdown\n");
644
            qemu_system_reset_request();
645
            ret = 1;
646
            break;
647
        case KVM_EXIT_UNKNOWN:
648
            dprintf("kvm_exit_unknown\n");
649
            break;
650
        case KVM_EXIT_FAIL_ENTRY:
651
            dprintf("kvm_exit_fail_entry\n");
652
            break;
653
        case KVM_EXIT_EXCEPTION:
654
            dprintf("kvm_exit_exception\n");
655
            break;
656
        case KVM_EXIT_DEBUG:
657
            dprintf("kvm_exit_debug\n");
658
#ifdef KVM_CAP_SET_GUEST_DEBUG
659
            if (kvm_arch_debug(&run->debug.arch)) {
660
                gdb_set_stop_cpu(env);
661
                vm_stop(EXCP_DEBUG);
662
                env->exception_index = EXCP_DEBUG;
663
                return 0;
664
            }
665
            /* re-enter, this exception was guest-internal */
666
            ret = 1;
667
#endif /* KVM_CAP_SET_GUEST_DEBUG */
668
            break;
669
        default:
670
            dprintf("kvm_arch_handle_exit\n");
671
            ret = kvm_arch_handle_exit(env, run);
672
            break;
673
        }
674
    } while (ret > 0);
675

    
676
    if (env->exit_request) {
677
        env->exit_request = 0;
678
        env->exception_index = EXCP_INTERRUPT;
679
    }
680

    
681
    return ret;
682
}
683

    
684
void kvm_set_phys_mem(target_phys_addr_t start_addr,
685
                      ram_addr_t size,
686
                      ram_addr_t phys_offset)
687
{
688
    KVMState *s = kvm_state;
689
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
690
    KVMSlot *mem, old;
691
    int err;
692

    
693
    if (start_addr & ~TARGET_PAGE_MASK) {
694
        if (flags >= IO_MEM_UNASSIGNED) {
695
            if (!kvm_lookup_overlapping_slot(s, start_addr,
696
                                             start_addr + size)) {
697
                return;
698
            }
699
            fprintf(stderr, "Unaligned split of a KVM memory slot\n");
700
        } else {
701
            fprintf(stderr, "Only page-aligned memory slots supported\n");
702
        }
703
        abort();
704
    }
705

    
706
    /* KVM does not support read-only slots */
707
    phys_offset &= ~IO_MEM_ROM;
708

    
709
    while (1) {
710
        mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
711
        if (!mem) {
712
            break;
713
        }
714

    
715
        if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr &&
716
            (start_addr + size <= mem->start_addr + mem->memory_size) &&
717
            (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) {
718
            /* The new slot fits into the existing one and comes with
719
             * identical parameters - nothing to be done. */
720
            return;
721
        }
722

    
723
        old = *mem;
724

    
725
        /* unregister the overlapping slot */
726
        mem->memory_size = 0;
727
        err = kvm_set_user_memory_region(s, mem);
728
        if (err) {
729
            fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
730
                    __func__, strerror(-err));
731
            abort();
732
        }
733

    
734
        /* Workaround for older KVM versions: we can't join slots, even not by
735
         * unregistering the previous ones and then registering the larger
736
         * slot. We have to maintain the existing fragmentation. Sigh.
737
         *
738
         * This workaround assumes that the new slot starts at the same
739
         * address as the first existing one. If not or if some overlapping
740
         * slot comes around later, we will fail (not seen in practice so far)
741
         * - and actually require a recent KVM version. */
742
        if (s->broken_set_mem_region &&
743
            old.start_addr == start_addr && old.memory_size < size &&
744
            flags < IO_MEM_UNASSIGNED) {
745
            mem = kvm_alloc_slot(s);
746
            mem->memory_size = old.memory_size;
747
            mem->start_addr = old.start_addr;
748
            mem->phys_offset = old.phys_offset;
749
            mem->flags = 0;
750

    
751
            err = kvm_set_user_memory_region(s, mem);
752
            if (err) {
753
                fprintf(stderr, "%s: error updating slot: %s\n", __func__,
754
                        strerror(-err));
755
                abort();
756
            }
757

    
758
            start_addr += old.memory_size;
759
            phys_offset += old.memory_size;
760
            size -= old.memory_size;
761
            continue;
762
        }
763

    
764
        /* register prefix slot */
765
        if (old.start_addr < start_addr) {
766
            mem = kvm_alloc_slot(s);
767
            mem->memory_size = start_addr - old.start_addr;
768
            mem->start_addr = old.start_addr;
769
            mem->phys_offset = old.phys_offset;
770
            mem->flags = 0;
771

    
772
            err = kvm_set_user_memory_region(s, mem);
773
            if (err) {
774
                fprintf(stderr, "%s: error registering prefix slot: %s\n",
775
                        __func__, strerror(-err));
776
                abort();
777
            }
778
        }
779

    
780
        /* register suffix slot */
781
        if (old.start_addr + old.memory_size > start_addr + size) {
782
            ram_addr_t size_delta;
783

    
784
            mem = kvm_alloc_slot(s);
785
            mem->start_addr = start_addr + size;
786
            size_delta = mem->start_addr - old.start_addr;
787
            mem->memory_size = old.memory_size - size_delta;
788
            mem->phys_offset = old.phys_offset + size_delta;
789
            mem->flags = 0;
790

    
791
            err = kvm_set_user_memory_region(s, mem);
792
            if (err) {
793
                fprintf(stderr, "%s: error registering suffix slot: %s\n",
794
                        __func__, strerror(-err));
795
                abort();
796
            }
797
        }
798
    }
799

    
800
    /* in case the KVM bug workaround already "consumed" the new slot */
801
    if (!size)
802
        return;
803

    
804
    /* KVM does not need to know about this memory */
805
    if (flags >= IO_MEM_UNASSIGNED)
806
        return;
807

    
808
    mem = kvm_alloc_slot(s);
809
    mem->memory_size = size;
810
    mem->start_addr = start_addr;
811
    mem->phys_offset = phys_offset;
812
    mem->flags = 0;
813

    
814
    err = kvm_set_user_memory_region(s, mem);
815
    if (err) {
816
        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
817
                strerror(-err));
818
        abort();
819
    }
820
}
821

    
822
int kvm_ioctl(KVMState *s, int type, ...)
823
{
824
    int ret;
825
    void *arg;
826
    va_list ap;
827

    
828
    va_start(ap, type);
829
    arg = va_arg(ap, void *);
830
    va_end(ap);
831

    
832
    ret = ioctl(s->fd, type, arg);
833
    if (ret == -1)
834
        ret = -errno;
835

    
836
    return ret;
837
}
838

    
839
int kvm_vm_ioctl(KVMState *s, int type, ...)
840
{
841
    int ret;
842
    void *arg;
843
    va_list ap;
844

    
845
    va_start(ap, type);
846
    arg = va_arg(ap, void *);
847
    va_end(ap);
848

    
849
    ret = ioctl(s->vmfd, type, arg);
850
    if (ret == -1)
851
        ret = -errno;
852

    
853
    return ret;
854
}
855

    
856
int kvm_vcpu_ioctl(CPUState *env, int type, ...)
857
{
858
    int ret;
859
    void *arg;
860
    va_list ap;
861

    
862
    va_start(ap, type);
863
    arg = va_arg(ap, void *);
864
    va_end(ap);
865

    
866
    ret = ioctl(env->kvm_fd, type, arg);
867
    if (ret == -1)
868
        ret = -errno;
869

    
870
    return ret;
871
}
872

    
873
int kvm_has_sync_mmu(void)
874
{
875
#ifdef KVM_CAP_SYNC_MMU
876
    KVMState *s = kvm_state;
877

    
878
    return kvm_check_extension(s, KVM_CAP_SYNC_MMU);
879
#else
880
    return 0;
881
#endif
882
}
883

    
884
void kvm_setup_guest_memory(void *start, size_t size)
885
{
886
    if (!kvm_has_sync_mmu()) {
887
#ifdef MADV_DONTFORK
888
        int ret = madvise(start, size, MADV_DONTFORK);
889

    
890
        if (ret) {
891
            perror("madvice");
892
            exit(1);
893
        }
894
#else
895
        fprintf(stderr,
896
                "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
897
        exit(1);
898
#endif
899
    }
900
}
901

    
902
#ifdef KVM_CAP_SET_GUEST_DEBUG
903
static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
904
{
905
    if (env == cpu_single_env) {
906
        func(data);
907
        return;
908
    }
909
    abort();
910
}
911

    
912
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
913
                                                 target_ulong pc)
914
{
915
    struct kvm_sw_breakpoint *bp;
916

    
917
    TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
918
        if (bp->pc == pc)
919
            return bp;
920
    }
921
    return NULL;
922
}
923

    
924
int kvm_sw_breakpoints_active(CPUState *env)
925
{
926
    return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
927
}
928

    
929
struct kvm_set_guest_debug_data {
930
    struct kvm_guest_debug dbg;
931
    CPUState *env;
932
    int err;
933
};
934

    
935
static void kvm_invoke_set_guest_debug(void *data)
936
{
937
    struct kvm_set_guest_debug_data *dbg_data = data;
938
    dbg_data->err = kvm_vcpu_ioctl(dbg_data->env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
939
}
940

    
941
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
942
{
943
    struct kvm_set_guest_debug_data data;
944

    
945
    data.dbg.control = 0;
946
    if (env->singlestep_enabled)
947
        data.dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
948

    
949
    kvm_arch_update_guest_debug(env, &data.dbg);
950
    data.dbg.control |= reinject_trap;
951
    data.env = env;
952

    
953
    on_vcpu(env, kvm_invoke_set_guest_debug, &data);
954
    return data.err;
955
}
956

    
957
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
958
                          target_ulong len, int type)
959
{
960
    struct kvm_sw_breakpoint *bp;
961
    CPUState *env;
962
    int err;
963

    
964
    if (type == GDB_BREAKPOINT_SW) {
965
        bp = kvm_find_sw_breakpoint(current_env, addr);
966
        if (bp) {
967
            bp->use_count++;
968
            return 0;
969
        }
970

    
971
        bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint));
972
        if (!bp)
973
            return -ENOMEM;
974

    
975
        bp->pc = addr;
976
        bp->use_count = 1;
977
        err = kvm_arch_insert_sw_breakpoint(current_env, bp);
978
        if (err) {
979
            free(bp);
980
            return err;
981
        }
982

    
983
        TAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
984
                          bp, entry);
985
    } else {
986
        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
987
        if (err)
988
            return err;
989
    }
990

    
991
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
992
        err = kvm_update_guest_debug(env, 0);
993
        if (err)
994
            return err;
995
    }
996
    return 0;
997
}
998

    
999
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
1000
                          target_ulong len, int type)
1001
{
1002
    struct kvm_sw_breakpoint *bp;
1003
    CPUState *env;
1004
    int err;
1005

    
1006
    if (type == GDB_BREAKPOINT_SW) {
1007
        bp = kvm_find_sw_breakpoint(current_env, addr);
1008
        if (!bp)
1009
            return -ENOENT;
1010

    
1011
        if (bp->use_count > 1) {
1012
            bp->use_count--;
1013
            return 0;
1014
        }
1015

    
1016
        err = kvm_arch_remove_sw_breakpoint(current_env, bp);
1017
        if (err)
1018
            return err;
1019

    
1020
        TAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
1021
        qemu_free(bp);
1022
    } else {
1023
        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
1024
        if (err)
1025
            return err;
1026
    }
1027

    
1028
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
1029
        err = kvm_update_guest_debug(env, 0);
1030
        if (err)
1031
            return err;
1032
    }
1033
    return 0;
1034
}
1035

    
1036
void kvm_remove_all_breakpoints(CPUState *current_env)
1037
{
1038
    struct kvm_sw_breakpoint *bp, *next;
1039
    KVMState *s = current_env->kvm_state;
1040
    CPUState *env;
1041

    
1042
    TAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
1043
        if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
1044
            /* Try harder to find a CPU that currently sees the breakpoint. */
1045
            for (env = first_cpu; env != NULL; env = env->next_cpu) {
1046
                if (kvm_arch_remove_sw_breakpoint(env, bp) == 0)
1047
                    break;
1048
            }
1049
        }
1050
    }
1051
    kvm_arch_remove_all_hw_breakpoints();
1052

    
1053
    for (env = first_cpu; env != NULL; env = env->next_cpu)
1054
        kvm_update_guest_debug(env, 0);
1055
}
1056

    
1057
#else /* !KVM_CAP_SET_GUEST_DEBUG */
1058

    
1059
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
1060
{
1061
    return -EINVAL;
1062
}
1063

    
1064
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
1065
                          target_ulong len, int type)
1066
{
1067
    return -EINVAL;
1068
}
1069

    
1070
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
1071
                          target_ulong len, int type)
1072
{
1073
    return -EINVAL;
1074
}
1075

    
1076
void kvm_remove_all_breakpoints(CPUState *current_env)
1077
{
1078
}
1079
#endif /* !KVM_CAP_SET_GUEST_DEBUG */