Statistics
| Branch: | Revision:

root / target-s390x / kvm.c @ 1eecf41b

History | View | Annotate | Download (24.9 kB)

1
/*
2
 * QEMU S390x KVM implementation
3
 *
4
 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5
 * Copyright IBM Corp. 2012
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * Contributions after 2012-10-29 are licensed under the terms of the
18
 * GNU GPL, version 2 or (at your option) any later version.
19
 *
20
 * You should have received a copy of the GNU (Lesser) General Public
21
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
 */
23

    
24
#include <sys/types.h>
25
#include <sys/ioctl.h>
26
#include <sys/mman.h>
27

    
28
#include <linux/kvm.h>
29
#include <asm/ptrace.h>
30

    
31
#include "qemu-common.h"
32
#include "qemu/timer.h"
33
#include "sysemu/sysemu.h"
34
#include "sysemu/kvm.h"
35
#include "cpu.h"
36
#include "sysemu/device_tree.h"
37
#include "qapi/qmp/qjson.h"
38
#include "monitor/monitor.h"
39

    
40
/* #define DEBUG_KVM */
41

    
42
#ifdef DEBUG_KVM
43
#define DPRINTF(fmt, ...) \
44
    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
45
#else
46
#define DPRINTF(fmt, ...) \
47
    do { } while (0)
48
#endif
49

    
50
#define IPA0_DIAG                       0x8300
51
#define IPA0_SIGP                       0xae00
52
#define IPA0_B2                         0xb200
53
#define IPA0_B9                         0xb900
54
#define IPA0_EB                         0xeb00
55

    
56
#define PRIV_B2_SCLP_CALL               0x20
57
#define PRIV_B2_CSCH                    0x30
58
#define PRIV_B2_HSCH                    0x31
59
#define PRIV_B2_MSCH                    0x32
60
#define PRIV_B2_SSCH                    0x33
61
#define PRIV_B2_STSCH                   0x34
62
#define PRIV_B2_TSCH                    0x35
63
#define PRIV_B2_TPI                     0x36
64
#define PRIV_B2_SAL                     0x37
65
#define PRIV_B2_RSCH                    0x38
66
#define PRIV_B2_STCRW                   0x39
67
#define PRIV_B2_STCPS                   0x3a
68
#define PRIV_B2_RCHP                    0x3b
69
#define PRIV_B2_SCHM                    0x3c
70
#define PRIV_B2_CHSC                    0x5f
71
#define PRIV_B2_SIGA                    0x74
72
#define PRIV_B2_XSCH                    0x76
73

    
74
#define PRIV_EB_SQBS                    0x8a
75

    
76
#define PRIV_B9_EQBS                    0x9c
77

    
78
#define DIAG_IPL                        0x308
79
#define DIAG_KVM_HYPERCALL              0x500
80
#define DIAG_KVM_BREAKPOINT             0x501
81

    
82
#define ICPT_INSTRUCTION                0x04
83
#define ICPT_WAITPSW                    0x1c
84
#define ICPT_SOFT_INTERCEPT             0x24
85
#define ICPT_CPU_STOP                   0x28
86
#define ICPT_IO                         0x40
87

    
88
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
89
    KVM_CAP_LAST_INFO
90
};
91

    
92
static int cap_sync_regs;
93
static int cap_async_pf;
94

    
95
static void *legacy_s390_alloc(size_t size);
96

    
97
int kvm_arch_init(KVMState *s)
98
{
99
    cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
100
    cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
101
    if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
102
        || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
103
        phys_mem_set_alloc(legacy_s390_alloc);
104
    }
105
    return 0;
106
}
107

    
108
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
109
{
110
    return cpu->cpu_index;
111
}
112

    
113
int kvm_arch_init_vcpu(CPUState *cpu)
114
{
115
    /* nothing todo yet */
116
    return 0;
117
}
118

    
119
void kvm_arch_reset_vcpu(CPUState *cpu)
120
{
121
    /* The initial reset call is needed here to reset in-kernel
122
     * vcpu data that we can't access directly from QEMU
123
     * (i.e. with older kernels which don't support sync_regs/ONE_REG).
124
     * Before this ioctl cpu_synchronize_state() is called in common kvm
125
     * code (kvm-all) */
126
    if (kvm_vcpu_ioctl(cpu, KVM_S390_INITIAL_RESET, NULL)) {
127
        perror("Can't reset vcpu\n");
128
    }
129
}
130

    
131
int kvm_arch_put_registers(CPUState *cs, int level)
132
{
133
    S390CPU *cpu = S390_CPU(cs);
134
    CPUS390XState *env = &cpu->env;
135
    struct kvm_one_reg reg;
136
    struct kvm_sregs sregs;
137
    struct kvm_regs regs;
138
    int ret;
139
    int i;
140

    
141
    /* always save the PSW  and the GPRS*/
142
    cs->kvm_run->psw_addr = env->psw.addr;
143
    cs->kvm_run->psw_mask = env->psw.mask;
144

    
145
    if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
146
        for (i = 0; i < 16; i++) {
147
            cs->kvm_run->s.regs.gprs[i] = env->regs[i];
148
            cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
149
        }
150
    } else {
151
        for (i = 0; i < 16; i++) {
152
            regs.gprs[i] = env->regs[i];
153
        }
154
        ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
155
        if (ret < 0) {
156
            return ret;
157
        }
158
    }
159

    
160
    /* Do we need to save more than that? */
161
    if (level == KVM_PUT_RUNTIME_STATE) {
162
        return 0;
163
    }
164

    
165
    reg.id = KVM_REG_S390_CPU_TIMER;
166
    reg.addr = (__u64)&(env->cputm);
167
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
168
    if (ret < 0) {
169
        return ret;
170
    }
171

    
172
    reg.id = KVM_REG_S390_CLOCK_COMP;
173
    reg.addr = (__u64)&(env->ckc);
174
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
175
    if (ret < 0) {
176
        return ret;
177
    }
178

    
179
    reg.id = KVM_REG_S390_TODPR;
180
    reg.addr = (__u64)&(env->todpr);
181
    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
182
    if (ret < 0) {
183
        return ret;
184
    }
185

    
186
    if (cap_async_pf) {
187
        reg.id = KVM_REG_S390_PFTOKEN;
188
        reg.addr = (__u64)&(env->pfault_token);
189
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
190
        if (ret < 0) {
191
            return ret;
192
        }
193

    
194
        reg.id = KVM_REG_S390_PFCOMPARE;
195
        reg.addr = (__u64)&(env->pfault_compare);
196
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
197
        if (ret < 0) {
198
            return ret;
199
        }
200

    
201
        reg.id = KVM_REG_S390_PFSELECT;
202
        reg.addr = (__u64)&(env->pfault_select);
203
        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
204
        if (ret < 0) {
205
            return ret;
206
        }
207
    }
208

    
209
    if (cap_sync_regs &&
210
        cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
211
        cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
212
        for (i = 0; i < 16; i++) {
213
            cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
214
            cs->kvm_run->s.regs.crs[i] = env->cregs[i];
215
        }
216
        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
217
        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
218
    } else {
219
        for (i = 0; i < 16; i++) {
220
            sregs.acrs[i] = env->aregs[i];
221
            sregs.crs[i] = env->cregs[i];
222
        }
223
        ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
224
        if (ret < 0) {
225
            return ret;
226
        }
227
    }
228

    
229
    /* Finally the prefix */
230
    if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
231
        cs->kvm_run->s.regs.prefix = env->psa;
232
        cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
233
    } else {
234
        /* prefix is only supported via sync regs */
235
    }
236
    return 0;
237
}
238

    
239
int kvm_arch_get_registers(CPUState *cs)
240
{
241
    S390CPU *cpu = S390_CPU(cs);
242
    CPUS390XState *env = &cpu->env;
243
    struct kvm_one_reg reg;
244
    struct kvm_sregs sregs;
245
    struct kvm_regs regs;
246
    int i, r;
247

    
248
    /* get the PSW */
249
    env->psw.addr = cs->kvm_run->psw_addr;
250
    env->psw.mask = cs->kvm_run->psw_mask;
251

    
252
    /* the GPRS */
253
    if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) {
254
        for (i = 0; i < 16; i++) {
255
            env->regs[i] = cs->kvm_run->s.regs.gprs[i];
256
        }
257
    } else {
258
        r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
259
        if (r < 0) {
260
            return r;
261
        }
262
         for (i = 0; i < 16; i++) {
263
            env->regs[i] = regs.gprs[i];
264
        }
265
    }
266

    
267
    /* The ACRS and CRS */
268
    if (cap_sync_regs &&
269
        cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS &&
270
        cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) {
271
        for (i = 0; i < 16; i++) {
272
            env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
273
            env->cregs[i] = cs->kvm_run->s.regs.crs[i];
274
        }
275
    } else {
276
        r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
277
        if (r < 0) {
278
            return r;
279
        }
280
         for (i = 0; i < 16; i++) {
281
            env->aregs[i] = sregs.acrs[i];
282
            env->cregs[i] = sregs.crs[i];
283
        }
284
    }
285

    
286
    /* The prefix */
287
    if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) {
288
        env->psa = cs->kvm_run->s.regs.prefix;
289
    }
290

    
291
    /* One Regs */
292
    reg.id = KVM_REG_S390_CPU_TIMER;
293
    reg.addr = (__u64)&(env->cputm);
294
    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
295
    if (r < 0) {
296
        return r;
297
    }
298

    
299
    reg.id = KVM_REG_S390_CLOCK_COMP;
300
    reg.addr = (__u64)&(env->ckc);
301
    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
302
    if (r < 0) {
303
        return r;
304
    }
305

    
306
    reg.id = KVM_REG_S390_TODPR;
307
    reg.addr = (__u64)&(env->todpr);
308
    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
309
    if (r < 0) {
310
        return r;
311
    }
312

    
313
    if (cap_async_pf) {
314
        reg.id = KVM_REG_S390_PFTOKEN;
315
        reg.addr = (__u64)&(env->pfault_token);
316
        r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
317
        if (r < 0) {
318
            return r;
319
        }
320

    
321
        reg.id = KVM_REG_S390_PFCOMPARE;
322
        reg.addr = (__u64)&(env->pfault_compare);
323
        r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
324
        if (r < 0) {
325
            return r;
326
        }
327

    
328
        reg.id = KVM_REG_S390_PFSELECT;
329
        reg.addr = (__u64)&(env->pfault_select);
330
        r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
331
        if (r < 0) {
332
            return r;
333
        }
334
    }
335

    
336
    return 0;
337
}
338

    
339
/*
340
 * Legacy layout for s390:
341
 * Older S390 KVM requires the topmost vma of the RAM to be
342
 * smaller than an system defined value, which is at least 256GB.
343
 * Larger systems have larger values. We put the guest between
344
 * the end of data segment (system break) and this value. We
345
 * use 32GB as a base to have enough room for the system break
346
 * to grow. We also have to use MAP parameters that avoid
347
 * read-only mapping of guest pages.
348
 */
349
static void *legacy_s390_alloc(size_t size)
350
{
351
    void *mem;
352

    
353
    mem = mmap((void *) 0x800000000ULL, size,
354
               PROT_EXEC|PROT_READ|PROT_WRITE,
355
               MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
356
    return mem == MAP_FAILED ? NULL : mem;
357
}
358

    
359
int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
360
{
361
    static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
362

    
363
    if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
364
        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501, 4, 1)) {
365
        return -EINVAL;
366
    }
367
    return 0;
368
}
369

    
370
int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
371
{
372
    uint8_t t[4];
373
    static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
374

    
375
    if (cpu_memory_rw_debug(cs, bp->pc, t, 4, 0)) {
376
        return -EINVAL;
377
    } else if (memcmp(t, diag_501, 4)) {
378
        return -EINVAL;
379
    } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
380
        return -EINVAL;
381
    }
382

    
383
    return 0;
384
}
385

    
386
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
387
{
388
}
389

    
390
void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
391
{
392
}
393

    
394
int kvm_arch_process_async_events(CPUState *cs)
395
{
396
    return cs->halted;
397
}
398

    
399
void kvm_s390_interrupt_internal(S390CPU *cpu, int type, uint32_t parm,
400
                                 uint64_t parm64, int vm)
401
{
402
    CPUState *cs = CPU(cpu);
403
    struct kvm_s390_interrupt kvmint;
404
    int r;
405

    
406
    if (!cs->kvm_state) {
407
        return;
408
    }
409

    
410
    kvmint.type = type;
411
    kvmint.parm = parm;
412
    kvmint.parm64 = parm64;
413

    
414
    if (vm) {
415
        r = kvm_vm_ioctl(cs->kvm_state, KVM_S390_INTERRUPT, &kvmint);
416
    } else {
417
        r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
418
    }
419

    
420
    if (r < 0) {
421
        fprintf(stderr, "KVM failed to inject interrupt\n");
422
        exit(1);
423
    }
424
}
425

    
426
void kvm_s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token)
427
{
428
    kvm_s390_interrupt_internal(cpu, KVM_S390_INT_VIRTIO, config_change,
429
                                token, 1);
430
}
431

    
432
void kvm_s390_interrupt(S390CPU *cpu, int type, uint32_t code)
433
{
434
    kvm_s390_interrupt_internal(cpu, type, code, 0, 0);
435
}
436

    
437
static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
438
{
439
    kvm_s390_interrupt(cpu, KVM_S390_PROGRAM_INT, code);
440
}
441

    
442
static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
443
                                 uint16_t ipbh0)
444
{
445
    CPUS390XState *env = &cpu->env;
446
    uint64_t sccb;
447
    uint32_t code;
448
    int r = 0;
449

    
450
    cpu_synchronize_state(CPU(cpu));
451
    sccb = env->regs[ipbh0 & 0xf];
452
    code = env->regs[(ipbh0 & 0xf0) >> 4];
453

    
454
    r = sclp_service_call(env, sccb, code);
455
    if (r < 0) {
456
        enter_pgmcheck(cpu, -r);
457
    } else {
458
        setcc(cpu, r);
459
    }
460

    
461
    return 0;
462
}
463

    
464
static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
465
{
466
    CPUS390XState *env = &cpu->env;
467
    int rc = 0;
468
    uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
469

    
470
    cpu_synchronize_state(CPU(cpu));
471

    
472
    switch (ipa1) {
473
    case PRIV_B2_XSCH:
474
        ioinst_handle_xsch(cpu, env->regs[1]);
475
        break;
476
    case PRIV_B2_CSCH:
477
        ioinst_handle_csch(cpu, env->regs[1]);
478
        break;
479
    case PRIV_B2_HSCH:
480
        ioinst_handle_hsch(cpu, env->regs[1]);
481
        break;
482
    case PRIV_B2_MSCH:
483
        ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
484
        break;
485
    case PRIV_B2_SSCH:
486
        ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
487
        break;
488
    case PRIV_B2_STCRW:
489
        ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
490
        break;
491
    case PRIV_B2_STSCH:
492
        ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
493
        break;
494
    case PRIV_B2_TSCH:
495
        /* We should only get tsch via KVM_EXIT_S390_TSCH. */
496
        fprintf(stderr, "Spurious tsch intercept\n");
497
        break;
498
    case PRIV_B2_CHSC:
499
        ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
500
        break;
501
    case PRIV_B2_TPI:
502
        /* This should have been handled by kvm already. */
503
        fprintf(stderr, "Spurious tpi intercept\n");
504
        break;
505
    case PRIV_B2_SCHM:
506
        ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
507
                           run->s390_sieic.ipb);
508
        break;
509
    case PRIV_B2_RSCH:
510
        ioinst_handle_rsch(cpu, env->regs[1]);
511
        break;
512
    case PRIV_B2_RCHP:
513
        ioinst_handle_rchp(cpu, env->regs[1]);
514
        break;
515
    case PRIV_B2_STCPS:
516
        /* We do not provide this instruction, it is suppressed. */
517
        break;
518
    case PRIV_B2_SAL:
519
        ioinst_handle_sal(cpu, env->regs[1]);
520
        break;
521
    case PRIV_B2_SIGA:
522
        /* Not provided, set CC = 3 for subchannel not operational */
523
        setcc(cpu, 3);
524
        break;
525
    case PRIV_B2_SCLP_CALL:
526
        rc = kvm_sclp_service_call(cpu, run, ipbh0);
527
        break;
528
    default:
529
        rc = -1;
530
        DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
531
        break;
532
    }
533

    
534
    return rc;
535
}
536

    
537
static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
538
{
539
    int r = 0;
540

    
541
    switch (ipa1) {
542
    case PRIV_B9_EQBS:
543
        /* just inject exception */
544
        r = -1;
545
        break;
546
    default:
547
        r = -1;
548
        DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
549
        break;
550
    }
551

    
552
    return r;
553
}
554

    
555
static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
556
{
557
    int r = 0;
558

    
559
    switch (ipa1) {
560
    case PRIV_EB_SQBS:
561
        /* just inject exception */
562
        r = -1;
563
        break;
564
    default:
565
        r = -1;
566
        DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipa1);
567
        break;
568
    }
569

    
570
    return r;
571
}
572

    
573
static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
574
{
575
    CPUS390XState *env = &cpu->env;
576
    int ret;
577

    
578
    cpu_synchronize_state(CPU(cpu));
579
    ret = s390_virtio_hypercall(env);
580
    if (ret == -EINVAL) {
581
        enter_pgmcheck(cpu, PGM_SPECIFICATION);
582
        return 0;
583
    }
584

    
585
    return ret;
586
}
587

    
588
static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
589
{
590
    uint64_t r1, r3;
591

    
592
    cpu_synchronize_state(CPU(cpu));
593
    r1 = (run->s390_sieic.ipa & 0x00f0) >> 8;
594
    r3 = run->s390_sieic.ipa & 0x000f;
595
    handle_diag_308(&cpu->env, r1, r3);
596
}
597

    
598
#define DIAG_KVM_CODE_MASK 0x000000000000ffff
599

    
600
static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
601
{
602
    int r = 0;
603
    uint16_t func_code;
604

    
605
    /*
606
     * For any diagnose call we support, bits 48-63 of the resulting
607
     * address specify the function code; the remainder is ignored.
608
     */
609
    func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK;
610
    switch (func_code) {
611
    case DIAG_IPL:
612
        kvm_handle_diag_308(cpu, run);
613
        break;
614
    case DIAG_KVM_HYPERCALL:
615
        r = handle_hypercall(cpu, run);
616
        break;
617
    case DIAG_KVM_BREAKPOINT:
618
        sleep(10);
619
        break;
620
    default:
621
        DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
622
        r = -1;
623
        break;
624
    }
625

    
626
    return r;
627
}
628

    
629
static int kvm_s390_cpu_start(S390CPU *cpu)
630
{
631
    s390_add_running_cpu(cpu);
632
    qemu_cpu_kick(CPU(cpu));
633
    DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env);
634
    return 0;
635
}
636

    
637
int kvm_s390_cpu_restart(S390CPU *cpu)
638
{
639
    kvm_s390_interrupt(cpu, KVM_S390_RESTART, 0);
640
    s390_add_running_cpu(cpu);
641
    qemu_cpu_kick(CPU(cpu));
642
    DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
643
    return 0;
644
}
645

    
646
static void sigp_initial_cpu_reset(void *arg)
647
{
648
    CPUState *cpu = arg;
649
    S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
650

    
651
    cpu_synchronize_state(cpu);
652
    scc->initial_cpu_reset(cpu);
653
}
654

    
655
static void sigp_cpu_reset(void *arg)
656
{
657
    CPUState *cpu = arg;
658
    S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
659

    
660
    cpu_synchronize_state(cpu);
661
    scc->cpu_reset(cpu);
662
}
663

    
664
#define SIGP_ORDER_MASK 0x000000ff
665

    
666
static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
667
{
668
    CPUS390XState *env = &cpu->env;
669
    uint8_t order_code;
670
    uint16_t cpu_addr;
671
    S390CPU *target_cpu;
672
    uint64_t *statusreg = &env->regs[ipa1 >> 4];
673
    int cc;
674

    
675
    cpu_synchronize_state(CPU(cpu));
676

    
677
    /* get order code */
678
    order_code = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK;
679

    
680
    cpu_addr = env->regs[ipa1 & 0x0f];
681
    target_cpu = s390_cpu_addr2state(cpu_addr);
682
    if (target_cpu == NULL) {
683
        cc = 3;    /* not operational */
684
        goto out;
685
    }
686

    
687
    switch (order_code) {
688
    case SIGP_START:
689
        cc = kvm_s390_cpu_start(target_cpu);
690
        break;
691
    case SIGP_RESTART:
692
        cc = kvm_s390_cpu_restart(target_cpu);
693
        break;
694
    case SIGP_SET_ARCH:
695
        *statusreg &= 0xffffffff00000000UL;
696
        *statusreg |= SIGP_STAT_INVALID_PARAMETER;
697
        cc = 1;   /* status stored */
698
        break;
699
    case SIGP_INITIAL_CPU_RESET:
700
        run_on_cpu(CPU(target_cpu), sigp_initial_cpu_reset, CPU(target_cpu));
701
        cc = 0;
702
        break;
703
    case SIGP_CPU_RESET:
704
        run_on_cpu(CPU(target_cpu), sigp_cpu_reset, CPU(target_cpu));
705
        cc = 0;
706
        break;
707
    default:
708
        DPRINTF("KVM: unknown SIGP: 0x%x\n", order_code);
709
        *statusreg &= 0xffffffff00000000UL;
710
        *statusreg |= SIGP_STAT_INVALID_ORDER;
711
        cc = 1;   /* status stored */
712
        break;
713
    }
714

    
715
out:
716
    setcc(cpu, cc);
717
    return 0;
718
}
719

    
720
static void handle_instruction(S390CPU *cpu, struct kvm_run *run)
721
{
722
    unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
723
    uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
724
    int r = -1;
725

    
726
    DPRINTF("handle_instruction 0x%x 0x%x\n",
727
            run->s390_sieic.ipa, run->s390_sieic.ipb);
728
    switch (ipa0) {
729
    case IPA0_B2:
730
        r = handle_b2(cpu, run, ipa1);
731
        break;
732
    case IPA0_B9:
733
        r = handle_b9(cpu, run, ipa1);
734
        break;
735
    case IPA0_EB:
736
        r = handle_eb(cpu, run, ipa1);
737
        break;
738
    case IPA0_DIAG:
739
        r = handle_diag(cpu, run, run->s390_sieic.ipb);
740
        break;
741
    case IPA0_SIGP:
742
        r = handle_sigp(cpu, run, ipa1);
743
        break;
744
    }
745

    
746
    if (r < 0) {
747
        enter_pgmcheck(cpu, 0x0001);
748
    }
749
}
750

    
751
static bool is_special_wait_psw(CPUState *cs)
752
{
753
    /* signal quiesce */
754
    return cs->kvm_run->psw_addr == 0xfffUL;
755
}
756

    
757
static int handle_intercept(S390CPU *cpu)
758
{
759
    CPUState *cs = CPU(cpu);
760
    struct kvm_run *run = cs->kvm_run;
761
    int icpt_code = run->s390_sieic.icptcode;
762
    int r = 0;
763

    
764
    DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
765
            (long)cs->kvm_run->psw_addr);
766
    switch (icpt_code) {
767
        case ICPT_INSTRUCTION:
768
            handle_instruction(cpu, run);
769
            break;
770
        case ICPT_WAITPSW:
771
            /* disabled wait, since enabled wait is handled in kernel */
772
            if (s390_del_running_cpu(cpu) == 0) {
773
                if (is_special_wait_psw(cs)) {
774
                    qemu_system_shutdown_request();
775
                } else {
776
                    QObject *data;
777

    
778
                    data = qobject_from_jsonf("{ 'action': %s }", "pause");
779
                    monitor_protocol_event(QEVENT_GUEST_PANICKED, data);
780
                    qobject_decref(data);
781
                    vm_stop(RUN_STATE_GUEST_PANICKED);
782
                }
783
            }
784
            r = EXCP_HALTED;
785
            break;
786
        case ICPT_CPU_STOP:
787
            if (s390_del_running_cpu(cpu) == 0) {
788
                qemu_system_shutdown_request();
789
            }
790
            r = EXCP_HALTED;
791
            break;
792
        case ICPT_SOFT_INTERCEPT:
793
            fprintf(stderr, "KVM unimplemented icpt SOFT\n");
794
            exit(1);
795
            break;
796
        case ICPT_IO:
797
            fprintf(stderr, "KVM unimplemented icpt IO\n");
798
            exit(1);
799
            break;
800
        default:
801
            fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
802
            exit(1);
803
            break;
804
    }
805

    
806
    return r;
807
}
808

    
809
static int handle_tsch(S390CPU *cpu)
810
{
811
    CPUS390XState *env = &cpu->env;
812
    CPUState *cs = CPU(cpu);
813
    struct kvm_run *run = cs->kvm_run;
814
    int ret;
815

    
816
    cpu_synchronize_state(cs);
817

    
818
    ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb);
819
    if (ret >= 0) {
820
        /* Success; set condition code. */
821
        setcc(cpu, ret);
822
        ret = 0;
823
    } else if (ret < -1) {
824
        /*
825
         * Failure.
826
         * If an I/O interrupt had been dequeued, we have to reinject it.
827
         */
828
        if (run->s390_tsch.dequeued) {
829
            uint16_t subchannel_id = run->s390_tsch.subchannel_id;
830
            uint16_t subchannel_nr = run->s390_tsch.subchannel_nr;
831
            uint32_t io_int_parm = run->s390_tsch.io_int_parm;
832
            uint32_t io_int_word = run->s390_tsch.io_int_word;
833
            uint32_t type = ((subchannel_id & 0xff00) << 24) |
834
                ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
835

    
836
            kvm_s390_interrupt_internal(cpu, type,
837
                                        ((uint32_t)subchannel_id << 16)
838
                                        | subchannel_nr,
839
                                        ((uint64_t)io_int_parm << 32)
840
                                        | io_int_word, 1);
841
        }
842
        ret = 0;
843
    }
844
    return ret;
845
}
846

    
847
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
848
{
849
    S390CPU *cpu = S390_CPU(cs);
850
    int ret = 0;
851

    
852
    switch (run->exit_reason) {
853
        case KVM_EXIT_S390_SIEIC:
854
            ret = handle_intercept(cpu);
855
            break;
856
        case KVM_EXIT_S390_RESET:
857
            qemu_system_reset_request();
858
            break;
859
        case KVM_EXIT_S390_TSCH:
860
            ret = handle_tsch(cpu);
861
            break;
862
        default:
863
            fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
864
            break;
865
    }
866

    
867
    if (ret == 0) {
868
        ret = EXCP_INTERRUPT;
869
    }
870
    return ret;
871
}
872

    
873
bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
874
{
875
    return true;
876
}
877

    
878
int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
879
{
880
    return 1;
881
}
882

    
883
int kvm_arch_on_sigbus(int code, void *addr)
884
{
885
    return 1;
886
}
887

    
888
void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id,
889
                           uint16_t subchannel_nr, uint32_t io_int_parm,
890
                           uint32_t io_int_word)
891
{
892
    uint32_t type;
893

    
894
    type = ((subchannel_id & 0xff00) << 24) |
895
        ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
896
    kvm_s390_interrupt_internal(cpu, type,
897
                                ((uint32_t)subchannel_id << 16) | subchannel_nr,
898
                                ((uint64_t)io_int_parm << 32) | io_int_word, 1);
899
}
900

    
901
void kvm_s390_crw_mchk(S390CPU *cpu)
902
{
903
    kvm_s390_interrupt_internal(cpu, KVM_S390_MCHK, 1 << 28,
904
                                0x00400f1d40330000, 1);
905
}
906

    
907
void kvm_s390_enable_css_support(S390CPU *cpu)
908
{
909
    struct kvm_enable_cap cap = {};
910
    int r;
911

    
912
    /* Activate host kernel channel subsystem support. */
913
    cap.cap = KVM_CAP_S390_CSS_SUPPORT;
914
    r = kvm_vcpu_ioctl(CPU(cpu), KVM_ENABLE_CAP, &cap);
915
    assert(r == 0);
916
}
917

    
918
void kvm_arch_init_irq_routing(KVMState *s)
919
{
920
}
921

    
922
int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
923
                                    int vq, bool assign)
924
{
925
    struct kvm_ioeventfd kick = {
926
        .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
927
        KVM_IOEVENTFD_FLAG_DATAMATCH,
928
        .fd = event_notifier_get_fd(notifier),
929
        .datamatch = vq,
930
        .addr = sch,
931
        .len = 8,
932
    };
933
    if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
934
        return -ENOSYS;
935
    }
936
    if (!assign) {
937
        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
938
    }
939
    return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
940
}