Statistics
| Branch: | Revision:

root / kvm.h @ 29eee86f

History | View | Annotate | Download (8.4 kB)

1
/*
2
 * QEMU KVM support
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10
 * See the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#ifndef QEMU_KVM_H
15
#define QEMU_KVM_H
16

    
17
#include <errno.h>
18
#include "config-host.h"
19
#include "qemu-queue.h"
20

    
21
#ifdef CONFIG_KVM
22
#include <linux/kvm.h>
23
#endif
24

    
25
extern int kvm_allowed;
26
extern bool kvm_kernel_irqchip;
27
extern bool kvm_async_interrupts_allowed;
28
extern bool kvm_irqfds_allowed;
29
extern bool kvm_msi_via_irqfd_allowed;
30
extern bool kvm_gsi_routing_allowed;
31

    
32
#if defined CONFIG_KVM || !defined NEED_CPU_H
33
#define kvm_enabled()           (kvm_allowed)
34
/**
35
 * kvm_irqchip_in_kernel:
36
 *
37
 * Returns: true if the user asked us to create an in-kernel
38
 * irqchip via the "kernel_irqchip=on" machine option.
39
 * What this actually means is architecture and machine model
40
 * specific: on PC, for instance, it means that the LAPIC,
41
 * IOAPIC and PIT are all in kernel. This function should never
42
 * be used from generic target-independent code: use one of the
43
 * following functions or some other specific check instead.
44
 */
45
#define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
46

    
47
/**
48
 * kvm_async_interrupts_enabled:
49
 *
50
 * Returns: true if we can deliver interrupts to KVM
51
 * asynchronously (ie by ioctl from any thread at any time)
52
 * rather than having to do interrupt delivery synchronously
53
 * (where the vcpu must be stopped at a suitable point first).
54
 */
55
#define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
56

    
57
/**
58
 * kvm_irqfds_enabled:
59
 *
60
 * Returns: true if we can use irqfds to inject interrupts into
61
 * a KVM CPU (ie the kernel supports irqfds and we are running
62
 * with a configuration where it is meaningful to use them).
63
 */
64
#define kvm_irqfds_enabled() (kvm_irqfds_allowed)
65

    
66
/**
67
 * kvm_msi_via_irqfd_enabled:
68
 *
69
 * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
70
 * to a KVM CPU via an irqfd. This requires that the kernel supports
71
 * this and that we're running in a configuration that permits it.
72
 */
73
#define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
74

    
75
/**
76
 * kvm_gsi_routing_enabled:
77
 *
78
 * Returns: true if GSI routing is enabled (ie the kernel supports
79
 * it and we're running in a configuration that permits it).
80
 */
81
#define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
82

    
83
#else
84
#define kvm_enabled()           (0)
85
#define kvm_irqchip_in_kernel() (false)
86
#define kvm_async_interrupts_enabled() (false)
87
#define kvm_irqfds_enabled() (false)
88
#define kvm_msi_via_irqfd_enabled() (false)
89
#define kvm_gsi_routing_allowed() (false)
90
#endif
91

    
92
struct kvm_run;
93
struct kvm_lapic_state;
94

    
95
typedef struct KVMCapabilityInfo {
96
    const char *name;
97
    int value;
98
} KVMCapabilityInfo;
99

    
100
#define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
101
#define KVM_CAP_LAST_INFO { NULL, 0 }
102

    
103
struct KVMState;
104
typedef struct KVMState KVMState;
105
extern KVMState *kvm_state;
106

    
107
/* external API */
108

    
109
int kvm_init(void);
110

    
111
int kvm_has_sync_mmu(void);
112
int kvm_has_vcpu_events(void);
113
int kvm_has_robust_singlestep(void);
114
int kvm_has_debugregs(void);
115
int kvm_has_xsave(void);
116
int kvm_has_xcrs(void);
117
int kvm_has_pit_state2(void);
118
int kvm_has_many_ioeventfds(void);
119
int kvm_has_gsi_routing(void);
120
int kvm_has_intx_set_mask(void);
121

    
122
#ifdef NEED_CPU_H
123
int kvm_init_vcpu(CPUArchState *env);
124

    
125
int kvm_cpu_exec(CPUArchState *env);
126

    
127
#if !defined(CONFIG_USER_ONLY)
128
void *kvm_vmalloc(ram_addr_t size);
129
void *kvm_arch_vmalloc(ram_addr_t size);
130
void kvm_setup_guest_memory(void *start, size_t size);
131

    
132
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
133
int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
134
void kvm_flush_coalesced_mmio_buffer(void);
135
#endif
136

    
137
int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
138
                          target_ulong len, int type);
139
int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
140
                          target_ulong len, int type);
141
void kvm_remove_all_breakpoints(CPUArchState *current_env);
142
int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap);
143
#ifndef _WIN32
144
int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset);
145
#endif
146

    
147
int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
148
int kvm_on_sigbus(int code, void *addr);
149

    
150
/* internal API */
151

    
152
int kvm_ioctl(KVMState *s, int type, ...);
153

    
154
int kvm_vm_ioctl(KVMState *s, int type, ...);
155

    
156
int kvm_vcpu_ioctl(CPUArchState *env, int type, ...);
157

    
158
/* Arch specific hooks */
159

    
160
extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
161

    
162
void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run);
163
void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run);
164

    
165
int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run);
166

    
167
int kvm_arch_process_async_events(CPUArchState *env);
168

    
169
int kvm_arch_get_registers(CPUArchState *env);
170

    
171
/* state subset only touched by the VCPU itself during runtime */
172
#define KVM_PUT_RUNTIME_STATE   1
173
/* state subset modified during VCPU reset */
174
#define KVM_PUT_RESET_STATE     2
175
/* full state set, modified during initialization or on vmload */
176
#define KVM_PUT_FULL_STATE      3
177

    
178
int kvm_arch_put_registers(CPUArchState *env, int level);
179

    
180
int kvm_arch_init(KVMState *s);
181

    
182
int kvm_arch_init_vcpu(CPUArchState *env);
183

    
184
void kvm_arch_reset_vcpu(CPUArchState *env);
185

    
186
int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
187
int kvm_arch_on_sigbus(int code, void *addr);
188

    
189
void kvm_arch_init_irq_routing(KVMState *s);
190

    
191
int kvm_set_irq(KVMState *s, int irq, int level);
192
int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
193

    
194
void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
195

    
196
void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
197
void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
198

    
199
struct kvm_guest_debug;
200
struct kvm_debug_exit_arch;
201

    
202
struct kvm_sw_breakpoint {
203
    target_ulong pc;
204
    target_ulong saved_insn;
205
    int use_count;
206
    QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
207
};
208

    
209
QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
210

    
211
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
212
                                                 target_ulong pc);
213

    
214
int kvm_sw_breakpoints_active(CPUArchState *env);
215

    
216
int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env,
217
                                  struct kvm_sw_breakpoint *bp);
218
int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env,
219
                                  struct kvm_sw_breakpoint *bp);
220
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
221
                                  target_ulong len, int type);
222
int kvm_arch_remove_hw_breakpoint(target_ulong addr,
223
                                  target_ulong len, int type);
224
void kvm_arch_remove_all_hw_breakpoints(void);
225

    
226
void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg);
227

    
228
bool kvm_arch_stop_on_emulation_error(CPUArchState *env);
229

    
230
int kvm_check_extension(KVMState *s, unsigned int extension);
231

    
232
uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
233
                                      uint32_t index, int reg);
234
void kvm_cpu_synchronize_state(CPUArchState *env);
235
void kvm_cpu_synchronize_post_reset(CPUArchState *env);
236
void kvm_cpu_synchronize_post_init(CPUArchState *env);
237

    
238
/* generic hooks - to be moved/refactored once there are more users */
239

    
240
static inline void cpu_synchronize_state(CPUArchState *env)
241
{
242
    if (kvm_enabled()) {
243
        kvm_cpu_synchronize_state(env);
244
    }
245
}
246

    
247
static inline void cpu_synchronize_post_reset(CPUArchState *env)
248
{
249
    if (kvm_enabled()) {
250
        kvm_cpu_synchronize_post_reset(env);
251
    }
252
}
253

    
254
static inline void cpu_synchronize_post_init(CPUArchState *env)
255
{
256
    if (kvm_enabled()) {
257
        kvm_cpu_synchronize_post_init(env);
258
    }
259
}
260

    
261

    
262
#if !defined(CONFIG_USER_ONLY)
263
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
264
                                       target_phys_addr_t *phys_addr);
265
#endif
266

    
267
#endif
268
int kvm_set_ioeventfd_mmio(int fd, uint32_t adr, uint32_t val, bool assign,
269
                           uint32_t size);
270

    
271
int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
272

    
273
int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg);
274
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg);
275
void kvm_irqchip_release_virq(KVMState *s, int virq);
276

    
277
int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
278
int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
279
#endif