Statistics
| Branch: | Revision:

root / target-ppc / kvm.c @ 26b14dc4

History | View | Annotate | Download (7.7 kB)

1
/*
2
 * PowerPC implementation of KVM hooks
3
 *
4
 * Copyright IBM Corp. 2007
5
 *
6
 * Authors:
7
 *  Jerone Young <jyoung5@us.ibm.com>
8
 *  Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9
 *  Hollis Blanchard <hollisb@us.ibm.com>
10
 *
11
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12
 * See the COPYING file in the top-level directory.
13
 *
14
 */
15

    
16
#include <sys/types.h>
17
#include <sys/ioctl.h>
18
#include <sys/mman.h>
19

    
20
#include <linux/kvm.h>
21

    
22
#include "qemu-common.h"
23
#include "qemu-timer.h"
24
#include "sysemu.h"
25
#include "kvm.h"
26
#include "kvm_ppc.h"
27
#include "cpu.h"
28
#include "device_tree.h"
29

    
30
//#define DEBUG_KVM
31

    
32
#ifdef DEBUG_KVM
33
#define dprintf(fmt, ...) \
34
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
35
#else
36
#define dprintf(fmt, ...) \
37
    do { } while (0)
38
#endif
39

    
40
/* XXX For some odd reason we sometimes hang inside KVM forever. I'd guess it's
41
 *     a race condition where we actually have a level triggered interrupt, but
42
 *     the infrastructure can't expose that yet, so the guest ACKs it, goes to
43
 *     sleep and never gets notified that there's still an interrupt pending.
44
 *
45
 *     As a quick workaround, let's just wake up every 500 ms. That way we can
46
 *     assure that we're always reinjecting interrupts in time.
47
 */
48
static QEMUTimer *idle_timer;
49

    
50
static void do_nothing(void *opaque)
51
{
52
    qemu_mod_timer(idle_timer, qemu_get_clock(vm_clock) +
53
                   (get_ticks_per_sec() / 2));
54
}
55

    
56
int kvm_arch_init(KVMState *s, int smp_cpus)
57
{
58
    return 0;
59
}
60

    
61
int kvm_arch_init_vcpu(CPUState *cenv)
62
{
63
    int ret = 0;
64
    struct kvm_sregs sregs;
65

    
66
    sregs.pvr = cenv->spr[SPR_PVR];
67
    ret = kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
68

    
69
    return ret;
70
}
71

    
72
void kvm_arch_reset_vcpu(CPUState *env)
73
{
74
}
75

    
76
int kvm_arch_put_registers(CPUState *env, int level)
77
{
78
    struct kvm_regs regs;
79
    int ret;
80
    int i;
81

    
82
    ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
83
    if (ret < 0)
84
        return ret;
85

    
86
    regs.ctr = env->ctr;
87
    regs.lr  = env->lr;
88
    regs.xer = env->xer;
89
    regs.msr = env->msr;
90
    regs.pc = env->nip;
91

    
92
    regs.srr0 = env->spr[SPR_SRR0];
93
    regs.srr1 = env->spr[SPR_SRR1];
94

    
95
    regs.sprg0 = env->spr[SPR_SPRG0];
96
    regs.sprg1 = env->spr[SPR_SPRG1];
97
    regs.sprg2 = env->spr[SPR_SPRG2];
98
    regs.sprg3 = env->spr[SPR_SPRG3];
99
    regs.sprg4 = env->spr[SPR_SPRG4];
100
    regs.sprg5 = env->spr[SPR_SPRG5];
101
    regs.sprg6 = env->spr[SPR_SPRG6];
102
    regs.sprg7 = env->spr[SPR_SPRG7];
103

    
104
    for (i = 0;i < 32; i++)
105
        regs.gpr[i] = env->gpr[i];
106

    
107
    ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, &regs);
108
    if (ret < 0)
109
        return ret;
110

    
111
    return ret;
112
}
113

    
114
int kvm_arch_get_registers(CPUState *env)
115
{
116
    struct kvm_regs regs;
117
    struct kvm_sregs sregs;
118
    uint32_t i, ret;
119

    
120
    ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, &regs);
121
    if (ret < 0)
122
        return ret;
123

    
124
    ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
125
    if (ret < 0)
126
        return ret;
127

    
128
    env->ctr = regs.ctr;
129
    env->lr = regs.lr;
130
    env->xer = regs.xer;
131
    env->msr = regs.msr;
132
    env->nip = regs.pc;
133

    
134
    env->spr[SPR_SRR0] = regs.srr0;
135
    env->spr[SPR_SRR1] = regs.srr1;
136

    
137
    env->spr[SPR_SPRG0] = regs.sprg0;
138
    env->spr[SPR_SPRG1] = regs.sprg1;
139
    env->spr[SPR_SPRG2] = regs.sprg2;
140
    env->spr[SPR_SPRG3] = regs.sprg3;
141
    env->spr[SPR_SPRG4] = regs.sprg4;
142
    env->spr[SPR_SPRG5] = regs.sprg5;
143
    env->spr[SPR_SPRG6] = regs.sprg6;
144
    env->spr[SPR_SPRG7] = regs.sprg7;
145

    
146
    for (i = 0;i < 32; i++)
147
        env->gpr[i] = regs.gpr[i];
148

    
149
#ifdef KVM_CAP_PPC_SEGSTATE
150
    if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_SEGSTATE)) {
151
        env->sdr1 = sregs.u.s.sdr1;
152

    
153
        /* Sync SLB */
154
#ifdef TARGET_PPC64
155
        for (i = 0; i < 64; i++) {
156
            ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
157
                               sregs.u.s.ppc64.slb[i].slbv);
158
        }
159
#endif
160

    
161
        /* Sync SRs */
162
        for (i = 0; i < 16; i++) {
163
            env->sr[i] = sregs.u.s.ppc32.sr[i];
164
        }
165

    
166
        /* Sync BATs */
167
        for (i = 0; i < 8; i++) {
168
            env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
169
            env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
170
            env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
171
            env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
172
        }
173
    }
174
#endif
175

    
176
    return 0;
177
}
178

    
179
#if defined(TARGET_PPCEMB)
180
#define PPC_INPUT_INT PPC40x_INPUT_INT
181
#elif defined(TARGET_PPC64)
182
#define PPC_INPUT_INT PPC970_INPUT_INT
183
#else
184
#define PPC_INPUT_INT PPC6xx_INPUT_INT
185
#endif
186

    
187
int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
188
{
189
    int r;
190
    unsigned irq;
191

    
192
    if (!idle_timer) {
193
        idle_timer = qemu_new_timer(vm_clock, do_nothing, NULL);
194
        qemu_mod_timer(idle_timer, qemu_get_clock(vm_clock) +
195
                       (get_ticks_per_sec() / 2));
196
    }
197

    
198
    /* PowerPC Qemu tracks the various core input pins (interrupt, critical
199
     * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
200
    if (run->ready_for_interrupt_injection &&
201
        (env->interrupt_request & CPU_INTERRUPT_HARD) &&
202
        (env->irq_input_state & (1<<PPC_INPUT_INT)))
203
    {
204
        /* For now KVM disregards the 'irq' argument. However, in the
205
         * future KVM could cache it in-kernel to avoid a heavyweight exit
206
         * when reading the UIC.
207
         */
208
        irq = -1U;
209

    
210
        dprintf("injected interrupt %d\n", irq);
211
        r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
212
        if (r < 0)
213
            printf("cpu %d fail inject %x\n", env->cpu_index, irq);
214
    }
215

    
216
    /* We don't know if there are more interrupts pending after this. However,
217
     * the guest will return to userspace in the course of handling this one
218
     * anyways, so we will get a chance to deliver the rest. */
219
    return 0;
220
}
221

    
222
int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
223
{
224
    return 0;
225
}
226

    
227
static int kvmppc_handle_halt(CPUState *env)
228
{
229
    if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
230
        env->halted = 1;
231
        env->exception_index = EXCP_HLT;
232
    }
233

    
234
    return 1;
235
}
236

    
237
/* map dcr access to existing qemu dcr emulation */
238
static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
239
{
240
    if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
241
        fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
242

    
243
    return 1;
244
}
245

    
246
static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
247
{
248
    if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
249
        fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
250

    
251
    return 1;
252
}
253

    
254
int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
255
{
256
    int ret = 0;
257

    
258
    switch (run->exit_reason) {
259
    case KVM_EXIT_DCR:
260
        if (run->dcr.is_write) {
261
            dprintf("handle dcr write\n");
262
            ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
263
        } else {
264
            dprintf("handle dcr read\n");
265
            ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
266
        }
267
        break;
268
    case KVM_EXIT_HLT:
269
        dprintf("handle halt\n");
270
        ret = kvmppc_handle_halt(env);
271
        break;
272
    }
273

    
274
    return ret;
275
}
276

    
277
static int read_cpuinfo(const char *field, char *value, int len)
278
{
279
    FILE *f;
280
    int ret = -1;
281
    int field_len = strlen(field);
282
    char line[512];
283

    
284
    f = fopen("/proc/cpuinfo", "r");
285
    if (!f) {
286
        return -1;
287
    }
288

    
289
    do {
290
        if(!fgets(line, sizeof(line), f)) {
291
            break;
292
        }
293
        if (!strncmp(line, field, field_len)) {
294
            strncpy(value, line, len);
295
            ret = 0;
296
            break;
297
        }
298
    } while(*line);
299

    
300
    fclose(f);
301

    
302
    return ret;
303
}
304

    
305
uint32_t kvmppc_get_tbfreq(void)
306
{
307
    char line[512];
308
    char *ns;
309
    uint32_t retval = get_ticks_per_sec();
310

    
311
    if (read_cpuinfo("timebase", line, sizeof(line))) {
312
        return retval;
313
    }
314

    
315
    if (!(ns = strchr(line, ':'))) {
316
        return retval;
317
    }
318

    
319
    ns++;
320

    
321
    retval = atoi(ns);
322
    return retval;
323
}