Statistics
| Branch: | Revision:

root / kvm-all.c @ e22a25c9

History | View | Annotate | Download (20.7 kB)

1
/*
2
 * QEMU KVM support
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *           Red Hat, Inc. 2008
6
 *
7
 * Authors:
8
 *  Anthony Liguori   <aliguori@us.ibm.com>
9
 *  Glauber Costa     <gcosta@redhat.com>
10
 *
11
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12
 * See the COPYING file in the top-level directory.
13
 *
14
 */
15

    
16
#include <sys/types.h>
17
#include <sys/ioctl.h>
18
#include <sys/mman.h>
19
#include <stdarg.h>
20

    
21
#include <linux/kvm.h>
22

    
23
#include "qemu-common.h"
24
#include "sysemu.h"
25
#include "gdbstub.h"
26
#include "kvm.h"
27

    
28
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
29
#define PAGE_SIZE TARGET_PAGE_SIZE
30

    
31
//#define DEBUG_KVM
32

    
33
#ifdef DEBUG_KVM
34
#define dprintf(fmt, ...) \
35
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
36
#else
37
#define dprintf(fmt, ...) \
38
    do { } while (0)
39
#endif
40

    
41
typedef struct KVMSlot
42
{
43
    target_phys_addr_t start_addr;
44
    ram_addr_t memory_size;
45
    ram_addr_t phys_offset;
46
    int slot;
47
    int flags;
48
} KVMSlot;
49

    
50
typedef struct kvm_dirty_log KVMDirtyLog;
51

    
52
int kvm_allowed = 0;
53

    
54
struct KVMState
55
{
56
    KVMSlot slots[32];
57
    int fd;
58
    int vmfd;
59
    int coalesced_mmio;
60
#ifdef KVM_CAP_SET_GUEST_DEBUG
61
    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
62
#endif
63
};
64

    
65
static KVMState *kvm_state;
66

    
67
static KVMSlot *kvm_alloc_slot(KVMState *s)
68
{
69
    int i;
70

    
71
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
72
        /* KVM private memory slots */
73
        if (i >= 8 && i < 12)
74
            continue;
75
        if (s->slots[i].memory_size == 0)
76
            return &s->slots[i];
77
    }
78

    
79
    return NULL;
80
}
81

    
82
static KVMSlot *kvm_lookup_slot(KVMState *s, target_phys_addr_t start_addr)
83
{
84
    int i;
85

    
86
    for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
87
        KVMSlot *mem = &s->slots[i];
88

    
89
        if (start_addr >= mem->start_addr &&
90
            start_addr < (mem->start_addr + mem->memory_size))
91
            return mem;
92
    }
93

    
94
    return NULL;
95
}
96

    
97
static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
98
{
99
    struct kvm_userspace_memory_region mem;
100

    
101
    mem.slot = slot->slot;
102
    mem.guest_phys_addr = slot->start_addr;
103
    mem.memory_size = slot->memory_size;
104
    mem.userspace_addr = (unsigned long)phys_ram_base + slot->phys_offset;
105
    mem.flags = slot->flags;
106

    
107
    return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
108
}
109

    
110

    
111
int kvm_init_vcpu(CPUState *env)
112
{
113
    KVMState *s = kvm_state;
114
    long mmap_size;
115
    int ret;
116

    
117
    dprintf("kvm_init_vcpu\n");
118

    
119
    ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
120
    if (ret < 0) {
121
        dprintf("kvm_create_vcpu failed\n");
122
        goto err;
123
    }
124

    
125
    env->kvm_fd = ret;
126
    env->kvm_state = s;
127

    
128
    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
129
    if (mmap_size < 0) {
130
        dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
131
        goto err;
132
    }
133

    
134
    env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
135
                        env->kvm_fd, 0);
136
    if (env->kvm_run == MAP_FAILED) {
137
        ret = -errno;
138
        dprintf("mmap'ing vcpu state failed\n");
139
        goto err;
140
    }
141

    
142
    ret = kvm_arch_init_vcpu(env);
143

    
144
err:
145
    return ret;
146
}
147

    
148
int kvm_sync_vcpus(void)
149
{
150
    CPUState *env;
151

    
152
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
153
        int ret;
154

    
155
        ret = kvm_arch_put_registers(env);
156
        if (ret)
157
            return ret;
158
    }
159

    
160
    return 0;
161
}
162

    
163
/*
164
 * dirty pages logging control
165
 */
166
static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, target_phys_addr_t end_addr,
167
                                      unsigned flags,
168
                                      unsigned mask)
169
{
170
    KVMState *s = kvm_state;
171
    KVMSlot *mem = kvm_lookup_slot(s, phys_addr);
172
    if (mem == NULL)  {
173
            dprintf("invalid parameters %llx-%llx\n", phys_addr, end_addr);
174
            return -EINVAL;
175
    }
176

    
177
    flags = (mem->flags & ~mask) | flags;
178
    /* Nothing changed, no need to issue ioctl */
179
    if (flags == mem->flags)
180
            return 0;
181

    
182
    mem->flags = flags;
183

    
184
    return kvm_set_user_memory_region(s, mem);
185
}
186

    
187
int kvm_log_start(target_phys_addr_t phys_addr, target_phys_addr_t end_addr)
188
{
189
        return kvm_dirty_pages_log_change(phys_addr, end_addr,
190
                                          KVM_MEM_LOG_DIRTY_PAGES,
191
                                          KVM_MEM_LOG_DIRTY_PAGES);
192
}
193

    
194
int kvm_log_stop(target_phys_addr_t phys_addr, target_phys_addr_t end_addr)
195
{
196
        return kvm_dirty_pages_log_change(phys_addr, end_addr,
197
                                          0,
198
                                          KVM_MEM_LOG_DIRTY_PAGES);
199
}
200

    
201
/**
202
 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
203
 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
204
 * This means all bits are set to dirty.
205
 *
206
 * @start_add: start of logged region. This is what we use to search the memslot
207
 * @end_addr: end of logged region.
208
 */
209
void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
210
{
211
    KVMState *s = kvm_state;
212
    KVMDirtyLog d;
213
    KVMSlot *mem = kvm_lookup_slot(s, start_addr);
214
    unsigned long alloc_size;
215
    ram_addr_t addr;
216
    target_phys_addr_t phys_addr = start_addr;
217

    
218
    dprintf("sync addr: %llx into %lx\n", start_addr, mem->phys_offset);
219
    if (mem == NULL) {
220
            fprintf(stderr, "BUG: %s: invalid parameters\n", __func__);
221
            return;
222
    }
223

    
224
    alloc_size = mem->memory_size >> TARGET_PAGE_BITS / sizeof(d.dirty_bitmap);
225
    d.dirty_bitmap = qemu_mallocz(alloc_size);
226

    
227
    d.slot = mem->slot;
228
    dprintf("slot %d, phys_addr %llx, uaddr: %llx\n",
229
            d.slot, mem->start_addr, mem->phys_offset);
230

    
231
    if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
232
        dprintf("ioctl failed %d\n", errno);
233
        goto out;
234
    }
235

    
236
    phys_addr = start_addr;
237
    for (addr = mem->phys_offset; phys_addr < end_addr; phys_addr+= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
238
        unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
239
        unsigned nr = (phys_addr - start_addr) >> TARGET_PAGE_BITS;
240
        unsigned word = nr / (sizeof(*bitmap) * 8);
241
        unsigned bit = nr % (sizeof(*bitmap) * 8);
242
        if ((bitmap[word] >> bit) & 1)
243
            cpu_physical_memory_set_dirty(addr);
244
    }
245
out:
246
    qemu_free(d.dirty_bitmap);
247
}
248

    
249
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
250
{
251
    int ret = -ENOSYS;
252
#ifdef KVM_CAP_COALESCED_MMIO
253
    KVMState *s = kvm_state;
254

    
255
    if (s->coalesced_mmio) {
256
        struct kvm_coalesced_mmio_zone zone;
257

    
258
        zone.addr = start;
259
        zone.size = size;
260

    
261
        ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
262
    }
263
#endif
264

    
265
    return ret;
266
}
267

    
268
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
269
{
270
    int ret = -ENOSYS;
271
#ifdef KVM_CAP_COALESCED_MMIO
272
    KVMState *s = kvm_state;
273

    
274
    if (s->coalesced_mmio) {
275
        struct kvm_coalesced_mmio_zone zone;
276

    
277
        zone.addr = start;
278
        zone.size = size;
279

    
280
        ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
281
    }
282
#endif
283

    
284
    return ret;
285
}
286

    
287
int kvm_init(int smp_cpus)
288
{
289
    KVMState *s;
290
    int ret;
291
    int i;
292

    
293
    if (smp_cpus > 1)
294
        return -EINVAL;
295

    
296
    s = qemu_mallocz(sizeof(KVMState));
297

    
298
#ifdef KVM_CAP_SET_GUEST_DEBUG
299
    TAILQ_INIT(&s->kvm_sw_breakpoints);
300
#endif
301
    for (i = 0; i < ARRAY_SIZE(s->slots); i++)
302
        s->slots[i].slot = i;
303

    
304
    s->vmfd = -1;
305
    s->fd = open("/dev/kvm", O_RDWR);
306
    if (s->fd == -1) {
307
        fprintf(stderr, "Could not access KVM kernel module: %m\n");
308
        ret = -errno;
309
        goto err;
310
    }
311

    
312
    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
313
    if (ret < KVM_API_VERSION) {
314
        if (ret > 0)
315
            ret = -EINVAL;
316
        fprintf(stderr, "kvm version too old\n");
317
        goto err;
318
    }
319

    
320
    if (ret > KVM_API_VERSION) {
321
        ret = -EINVAL;
322
        fprintf(stderr, "kvm version not supported\n");
323
        goto err;
324
    }
325

    
326
    s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
327
    if (s->vmfd < 0)
328
        goto err;
329

    
330
    /* initially, KVM allocated its own memory and we had to jump through
331
     * hooks to make phys_ram_base point to this.  Modern versions of KVM
332
     * just use a user allocated buffer so we can use phys_ram_base
333
     * unmodified.  Make sure we have a sufficiently modern version of KVM.
334
     */
335
    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
336
    if (ret <= 0) {
337
        if (ret == 0)
338
            ret = -EINVAL;
339
        fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n");
340
        goto err;
341
    }
342

    
343
    /* There was a nasty bug in < kvm-80 that prevents memory slots from being
344
     * destroyed properly.  Since we rely on this capability, refuse to work
345
     * with any kernel without this capability. */
346
    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION,
347
                    KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
348
    if (ret <= 0) {
349
        if (ret == 0)
350
            ret = -EINVAL;
351

    
352
        fprintf(stderr,
353
                "KVM kernel module broken (DESTROY_MEMORY_REGION)\n"
354
                "Please upgrade to at least kvm-81.\n");
355
        goto err;
356
    }
357

    
358
    s->coalesced_mmio = 0;
359
#ifdef KVM_CAP_COALESCED_MMIO
360
    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
361
    if (ret > 0)
362
        s->coalesced_mmio = ret;
363
#endif
364

    
365
    ret = kvm_arch_init(s, smp_cpus);
366
    if (ret < 0)
367
        goto err;
368

    
369
    kvm_state = s;
370

    
371
    return 0;
372

    
373
err:
374
    if (s) {
375
        if (s->vmfd != -1)
376
            close(s->vmfd);
377
        if (s->fd != -1)
378
            close(s->fd);
379
    }
380
    qemu_free(s);
381

    
382
    return ret;
383
}
384

    
385
static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
386
                         int direction, int size, uint32_t count)
387
{
388
    int i;
389
    uint8_t *ptr = data;
390

    
391
    for (i = 0; i < count; i++) {
392
        if (direction == KVM_EXIT_IO_IN) {
393
            switch (size) {
394
            case 1:
395
                stb_p(ptr, cpu_inb(env, port));
396
                break;
397
            case 2:
398
                stw_p(ptr, cpu_inw(env, port));
399
                break;
400
            case 4:
401
                stl_p(ptr, cpu_inl(env, port));
402
                break;
403
            }
404
        } else {
405
            switch (size) {
406
            case 1:
407
                cpu_outb(env, port, ldub_p(ptr));
408
                break;
409
            case 2:
410
                cpu_outw(env, port, lduw_p(ptr));
411
                break;
412
            case 4:
413
                cpu_outl(env, port, ldl_p(ptr));
414
                break;
415
            }
416
        }
417

    
418
        ptr += size;
419
    }
420

    
421
    return 1;
422
}
423

    
424
static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
425
{
426
#ifdef KVM_CAP_COALESCED_MMIO
427
    KVMState *s = kvm_state;
428
    if (s->coalesced_mmio) {
429
        struct kvm_coalesced_mmio_ring *ring;
430

    
431
        ring = (void *)run + (s->coalesced_mmio * TARGET_PAGE_SIZE);
432
        while (ring->first != ring->last) {
433
            struct kvm_coalesced_mmio *ent;
434

    
435
            ent = &ring->coalesced_mmio[ring->first];
436

    
437
            cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
438
            /* FIXME smp_wmb() */
439
            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
440
        }
441
    }
442
#endif
443
}
444

    
445
int kvm_cpu_exec(CPUState *env)
446
{
447
    struct kvm_run *run = env->kvm_run;
448
    int ret;
449

    
450
    dprintf("kvm_cpu_exec()\n");
451

    
452
    do {
453
        kvm_arch_pre_run(env, run);
454

    
455
        if (env->exit_request) {
456
            dprintf("interrupt exit requested\n");
457
            ret = 0;
458
            break;
459
        }
460

    
461
        ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
462
        kvm_arch_post_run(env, run);
463

    
464
        if (ret == -EINTR || ret == -EAGAIN) {
465
            dprintf("io window exit\n");
466
            ret = 0;
467
            break;
468
        }
469

    
470
        if (ret < 0) {
471
            dprintf("kvm run failed %s\n", strerror(-ret));
472
            abort();
473
        }
474

    
475
        kvm_run_coalesced_mmio(env, run);
476

    
477
        ret = 0; /* exit loop */
478
        switch (run->exit_reason) {
479
        case KVM_EXIT_IO:
480
            dprintf("handle_io\n");
481
            ret = kvm_handle_io(env, run->io.port,
482
                                (uint8_t *)run + run->io.data_offset,
483
                                run->io.direction,
484
                                run->io.size,
485
                                run->io.count);
486
            break;
487
        case KVM_EXIT_MMIO:
488
            dprintf("handle_mmio\n");
489
            cpu_physical_memory_rw(run->mmio.phys_addr,
490
                                   run->mmio.data,
491
                                   run->mmio.len,
492
                                   run->mmio.is_write);
493
            ret = 1;
494
            break;
495
        case KVM_EXIT_IRQ_WINDOW_OPEN:
496
            dprintf("irq_window_open\n");
497
            break;
498
        case KVM_EXIT_SHUTDOWN:
499
            dprintf("shutdown\n");
500
            qemu_system_reset_request();
501
            ret = 1;
502
            break;
503
        case KVM_EXIT_UNKNOWN:
504
            dprintf("kvm_exit_unknown\n");
505
            break;
506
        case KVM_EXIT_FAIL_ENTRY:
507
            dprintf("kvm_exit_fail_entry\n");
508
            break;
509
        case KVM_EXIT_EXCEPTION:
510
            dprintf("kvm_exit_exception\n");
511
            break;
512
        case KVM_EXIT_DEBUG:
513
            dprintf("kvm_exit_debug\n");
514
#ifdef KVM_CAP_SET_GUEST_DEBUG
515
            if (kvm_arch_debug(&run->debug.arch)) {
516
                gdb_set_stop_cpu(env);
517
                vm_stop(EXCP_DEBUG);
518
                env->exception_index = EXCP_DEBUG;
519
                return 0;
520
            }
521
            /* re-enter, this exception was guest-internal */
522
            ret = 1;
523
#endif /* KVM_CAP_SET_GUEST_DEBUG */
524
            break;
525
        default:
526
            dprintf("kvm_arch_handle_exit\n");
527
            ret = kvm_arch_handle_exit(env, run);
528
            break;
529
        }
530
    } while (ret > 0);
531

    
532
    if (env->exit_request) {
533
        env->exit_request = 0;
534
        env->exception_index = EXCP_INTERRUPT;
535
    }
536

    
537
    return ret;
538
}
539

    
540
void kvm_set_phys_mem(target_phys_addr_t start_addr,
541
                      ram_addr_t size,
542
                      ram_addr_t phys_offset)
543
{
544
    KVMState *s = kvm_state;
545
    ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
546
    KVMSlot *mem;
547

    
548
    /* KVM does not support read-only slots */
549
    phys_offset &= ~IO_MEM_ROM;
550

    
551
    mem = kvm_lookup_slot(s, start_addr);
552
    if (mem) {
553
        if ((flags == IO_MEM_UNASSIGNED) || (flags >= TLB_MMIO)) {
554
            mem->memory_size = 0;
555
            mem->start_addr = start_addr;
556
            mem->phys_offset = 0;
557
            mem->flags = 0;
558

    
559
            kvm_set_user_memory_region(s, mem);
560
        } else if (start_addr >= mem->start_addr &&
561
                   (start_addr + size) <= (mem->start_addr +
562
                                           mem->memory_size)) {
563
            KVMSlot slot;
564
            target_phys_addr_t mem_start;
565
            ram_addr_t mem_size, mem_offset;
566

    
567
            /* Not splitting */
568
            if ((phys_offset - (start_addr - mem->start_addr)) == 
569
                mem->phys_offset)
570
                return;
571

    
572
            /* unregister whole slot */
573
            memcpy(&slot, mem, sizeof(slot));
574
            mem->memory_size = 0;
575
            kvm_set_user_memory_region(s, mem);
576

    
577
            /* register prefix slot */
578
            mem_start = slot.start_addr;
579
            mem_size = start_addr - slot.start_addr;
580
            mem_offset = slot.phys_offset;
581
            if (mem_size)
582
                kvm_set_phys_mem(mem_start, mem_size, mem_offset);
583

    
584
            /* register new slot */
585
            kvm_set_phys_mem(start_addr, size, phys_offset);
586

    
587
            /* register suffix slot */
588
            mem_start = start_addr + size;
589
            mem_offset += mem_size + size;
590
            mem_size = slot.memory_size - mem_size - size;
591
            if (mem_size)
592
                kvm_set_phys_mem(mem_start, mem_size, mem_offset);
593

    
594
            return;
595
        } else {
596
            printf("Registering overlapping slot\n");
597
            abort();
598
        }
599
    }
600
    /* KVM does not need to know about this memory */
601
    if (flags >= IO_MEM_UNASSIGNED)
602
        return;
603

    
604
    mem = kvm_alloc_slot(s);
605
    mem->memory_size = size;
606
    mem->start_addr = start_addr;
607
    mem->phys_offset = phys_offset;
608
    mem->flags = 0;
609

    
610
    kvm_set_user_memory_region(s, mem);
611
    /* FIXME deal with errors */
612
}
613

    
614
int kvm_ioctl(KVMState *s, int type, ...)
615
{
616
    int ret;
617
    void *arg;
618
    va_list ap;
619

    
620
    va_start(ap, type);
621
    arg = va_arg(ap, void *);
622
    va_end(ap);
623

    
624
    ret = ioctl(s->fd, type, arg);
625
    if (ret == -1)
626
        ret = -errno;
627

    
628
    return ret;
629
}
630

    
631
int kvm_vm_ioctl(KVMState *s, int type, ...)
632
{
633
    int ret;
634
    void *arg;
635
    va_list ap;
636

    
637
    va_start(ap, type);
638
    arg = va_arg(ap, void *);
639
    va_end(ap);
640

    
641
    ret = ioctl(s->vmfd, type, arg);
642
    if (ret == -1)
643
        ret = -errno;
644

    
645
    return ret;
646
}
647

    
648
int kvm_vcpu_ioctl(CPUState *env, int type, ...)
649
{
650
    int ret;
651
    void *arg;
652
    va_list ap;
653

    
654
    va_start(ap, type);
655
    arg = va_arg(ap, void *);
656
    va_end(ap);
657

    
658
    ret = ioctl(env->kvm_fd, type, arg);
659
    if (ret == -1)
660
        ret = -errno;
661

    
662
    return ret;
663
}
664

    
665
int kvm_has_sync_mmu(void)
666
{
667
#ifdef KVM_CAP_SYNC_MMU
668
    KVMState *s = kvm_state;
669

    
670
    if (kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU) > 0)
671
        return 1;
672
#endif
673

    
674
    return 0;
675
}
676

    
677
#ifdef KVM_CAP_SET_GUEST_DEBUG
678
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
679
                                                 target_ulong pc)
680
{
681
    struct kvm_sw_breakpoint *bp;
682

    
683
    TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
684
        if (bp->pc == pc)
685
            return bp;
686
    }
687
    return NULL;
688
}
689

    
690
int kvm_sw_breakpoints_active(CPUState *env)
691
{
692
    return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
693
}
694

    
695
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
696
{
697
    struct kvm_guest_debug dbg;
698

    
699
    dbg.control = 0;
700
    if (env->singlestep_enabled)
701
        dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
702

    
703
    kvm_arch_update_guest_debug(env, &dbg);
704
    dbg.control |= reinject_trap;
705

    
706
    return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg);
707
}
708

    
709
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
710
                          target_ulong len, int type)
711
{
712
    struct kvm_sw_breakpoint *bp;
713
    CPUState *env;
714
    int err;
715

    
716
    if (type == GDB_BREAKPOINT_SW) {
717
        bp = kvm_find_sw_breakpoint(current_env, addr);
718
        if (bp) {
719
            bp->use_count++;
720
            return 0;
721
        }
722

    
723
        bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint));
724
        if (!bp)
725
            return -ENOMEM;
726

    
727
        bp->pc = addr;
728
        bp->use_count = 1;
729
        err = kvm_arch_insert_sw_breakpoint(current_env, bp);
730
        if (err) {
731
            free(bp);
732
            return err;
733
        }
734

    
735
        TAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
736
                          bp, entry);
737
    } else {
738
        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
739
        if (err)
740
            return err;
741
    }
742

    
743
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
744
        err = kvm_update_guest_debug(env, 0);
745
        if (err)
746
            return err;
747
    }
748
    return 0;
749
}
750

    
751
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
752
                          target_ulong len, int type)
753
{
754
    struct kvm_sw_breakpoint *bp;
755
    CPUState *env;
756
    int err;
757

    
758
    if (type == GDB_BREAKPOINT_SW) {
759
        bp = kvm_find_sw_breakpoint(current_env, addr);
760
        if (!bp)
761
            return -ENOENT;
762

    
763
        if (bp->use_count > 1) {
764
            bp->use_count--;
765
            return 0;
766
        }
767

    
768
        err = kvm_arch_remove_sw_breakpoint(current_env, bp);
769
        if (err)
770
            return err;
771

    
772
        TAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
773
        qemu_free(bp);
774
    } else {
775
        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
776
        if (err)
777
            return err;
778
    }
779

    
780
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
781
        err = kvm_update_guest_debug(env, 0);
782
        if (err)
783
            return err;
784
    }
785
    return 0;
786
}
787

    
788
void kvm_remove_all_breakpoints(CPUState *current_env)
789
{
790
    struct kvm_sw_breakpoint *bp, *next;
791
    KVMState *s = current_env->kvm_state;
792
    CPUState *env;
793

    
794
    TAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
795
        if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
796
            /* Try harder to find a CPU that currently sees the breakpoint. */
797
            for (env = first_cpu; env != NULL; env = env->next_cpu) {
798
                if (kvm_arch_remove_sw_breakpoint(env, bp) == 0)
799
                    break;
800
            }
801
        }
802
    }
803
    kvm_arch_remove_all_hw_breakpoints();
804

    
805
    for (env = first_cpu; env != NULL; env = env->next_cpu)
806
        kvm_update_guest_debug(env, 0);
807
}
808

    
809
#else /* !KVM_CAP_SET_GUEST_DEBUG */
810

    
811
int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
812
{
813
    return -EINVAL;
814
}
815

    
816
int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
817
                          target_ulong len, int type)
818
{
819
    return -EINVAL;
820
}
821

    
822
int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
823
                          target_ulong len, int type)
824
{
825
    return -EINVAL;
826
}
827

    
828
void kvm_remove_all_breakpoints(CPUState *current_env)
829
{
830
}
831
#endif /* !KVM_CAP_SET_GUEST_DEBUG */