Statistics
| Branch: | Revision:

root / target-s390x / helper.c @ 795ca114

History | View | Annotate | Download (22.1 kB)

1
/*
2
 *  S/390 helpers
3
 *
4
 *  Copyright (c) 2009 Ulrich Hecht
5
 *  Copyright (c) 2011 Alexander Graf
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include "cpu.h"
22
#include "exec/gdbstub.h"
23
#include "qemu/timer.h"
24
#ifndef CONFIG_USER_ONLY
25
#include "sysemu/sysemu.h"
26
#endif
27

    
28
//#define DEBUG_S390
29
//#define DEBUG_S390_PTE
30
//#define DEBUG_S390_STDOUT
31

    
32
#ifdef DEBUG_S390
33
#ifdef DEBUG_S390_STDOUT
34
#define DPRINTF(fmt, ...) \
35
    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36
         qemu_log(fmt, ##__VA_ARGS__); } while (0)
37
#else
38
#define DPRINTF(fmt, ...) \
39
    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40
#endif
41
#else
42
#define DPRINTF(fmt, ...) \
43
    do { } while (0)
44
#endif
45

    
46
#ifdef DEBUG_S390_PTE
47
#define PTE_DPRINTF DPRINTF
48
#else
49
#define PTE_DPRINTF(fmt, ...) \
50
    do { } while (0)
51
#endif
52

    
53
#ifndef CONFIG_USER_ONLY
54
void s390x_tod_timer(void *opaque)
55
{
56
    S390CPU *cpu = opaque;
57
    CPUS390XState *env = &cpu->env;
58

    
59
    env->pending_int |= INTERRUPT_TOD;
60
    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
61
}
62

    
63
void s390x_cpu_timer(void *opaque)
64
{
65
    S390CPU *cpu = opaque;
66
    CPUS390XState *env = &cpu->env;
67

    
68
    env->pending_int |= INTERRUPT_CPUTIMER;
69
    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
70
}
71
#endif
72

    
73
S390CPU *cpu_s390x_init(const char *cpu_model)
74
{
75
    S390CPU *cpu;
76

    
77
    cpu = S390_CPU(object_new(TYPE_S390_CPU));
78

    
79
    object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
80

    
81
    return cpu;
82
}
83

    
84
#if defined(CONFIG_USER_ONLY)
85

    
86
void s390_cpu_do_interrupt(CPUState *cs)
87
{
88
    S390CPU *cpu = S390_CPU(cs);
89
    CPUS390XState *env = &cpu->env;
90

    
91
    env->exception_index = -1;
92
}
93

    
94
int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong address,
95
                               int rw, int mmu_idx)
96
{
97
    env->exception_index = EXCP_PGM;
98
    env->int_pgm_code = PGM_ADDRESSING;
99
    /* On real machines this value is dropped into LowMem.  Since this
100
       is userland, simply put this someplace that cpu_loop can find it.  */
101
    env->__excp_addr = address;
102
    return 1;
103
}
104

    
105
#else /* !CONFIG_USER_ONLY */
106

    
107
/* Ensure to exit the TB after this call! */
108
static void trigger_pgm_exception(CPUS390XState *env, uint32_t code,
109
                                  uint32_t ilen)
110
{
111
    env->exception_index = EXCP_PGM;
112
    env->int_pgm_code = code;
113
    env->int_pgm_ilen = ilen;
114
}
115

    
116
static int trans_bits(CPUS390XState *env, uint64_t mode)
117
{
118
    int bits = 0;
119

    
120
    switch (mode) {
121
    case PSW_ASC_PRIMARY:
122
        bits = 1;
123
        break;
124
    case PSW_ASC_SECONDARY:
125
        bits = 2;
126
        break;
127
    case PSW_ASC_HOME:
128
        bits = 3;
129
        break;
130
    default:
131
        cpu_abort(env, "unknown asc mode\n");
132
        break;
133
    }
134

    
135
    return bits;
136
}
137

    
138
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
139
                               uint64_t mode)
140
{
141
    int ilen = ILEN_LATER_INC;
142
    int bits = trans_bits(env, mode) | 4;
143

    
144
    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
145

    
146
    stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
147
    trigger_pgm_exception(env, PGM_PROTECTION, ilen);
148
}
149

    
150
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
151
                               uint32_t type, uint64_t asc, int rw)
152
{
153
    int ilen = ILEN_LATER;
154
    int bits = trans_bits(env, asc);
155

    
156
    /* Code accesses have an undefined ilc.  */
157
    if (rw == 2) {
158
        ilen = 2;
159
    }
160

    
161
    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
162

    
163
    stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
164
    trigger_pgm_exception(env, type, ilen);
165
}
166

    
167
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
168
                              uint64_t asc, uint64_t asce, int level,
169
                              target_ulong *raddr, int *flags, int rw)
170
{
171
    uint64_t offs = 0;
172
    uint64_t origin;
173
    uint64_t new_asce;
174

    
175
    PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, asce);
176

    
177
    if (((level != _ASCE_TYPE_SEGMENT) && (asce & _REGION_ENTRY_INV)) ||
178
        ((level == _ASCE_TYPE_SEGMENT) && (asce & _SEGMENT_ENTRY_INV))) {
179
        /* XXX different regions have different faults */
180
        DPRINTF("%s: invalid region\n", __func__);
181
        trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw);
182
        return -1;
183
    }
184

    
185
    if ((level <= _ASCE_TYPE_MASK) && ((asce & _ASCE_TYPE_MASK) != level)) {
186
        trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
187
        return -1;
188
    }
189

    
190
    if (asce & _ASCE_REAL_SPACE) {
191
        /* direct mapping */
192

    
193
        *raddr = vaddr;
194
        return 0;
195
    }
196

    
197
    origin = asce & _ASCE_ORIGIN;
198

    
199
    switch (level) {
200
    case _ASCE_TYPE_REGION1 + 4:
201
        offs = (vaddr >> 50) & 0x3ff8;
202
        break;
203
    case _ASCE_TYPE_REGION1:
204
        offs = (vaddr >> 39) & 0x3ff8;
205
        break;
206
    case _ASCE_TYPE_REGION2:
207
        offs = (vaddr >> 28) & 0x3ff8;
208
        break;
209
    case _ASCE_TYPE_REGION3:
210
        offs = (vaddr >> 17) & 0x3ff8;
211
        break;
212
    case _ASCE_TYPE_SEGMENT:
213
        offs = (vaddr >> 9) & 0x07f8;
214
        origin = asce & _SEGMENT_ENTRY_ORIGIN;
215
        break;
216
    }
217

    
218
    /* XXX region protection flags */
219
    /* *flags &= ~PAGE_WRITE */
220

    
221
    new_asce = ldq_phys(origin + offs);
222
    PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
223
                __func__, origin, offs, new_asce);
224

    
225
    if (level != _ASCE_TYPE_SEGMENT) {
226
        /* yet another region */
227
        return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr,
228
                                  flags, rw);
229
    }
230

    
231
    /* PTE */
232
    if (new_asce & _PAGE_INVALID) {
233
        DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, new_asce);
234
        trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw);
235
        return -1;
236
    }
237

    
238
    if (new_asce & _PAGE_RO) {
239
        *flags &= ~PAGE_WRITE;
240
    }
241

    
242
    *raddr = new_asce & _ASCE_ORIGIN;
243

    
244
    PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, new_asce);
245

    
246
    return 0;
247
}
248

    
249
static int mmu_translate_asc(CPUS390XState *env, target_ulong vaddr,
250
                             uint64_t asc, target_ulong *raddr, int *flags,
251
                             int rw)
252
{
253
    uint64_t asce = 0;
254
    int level, new_level;
255
    int r;
256

    
257
    switch (asc) {
258
    case PSW_ASC_PRIMARY:
259
        PTE_DPRINTF("%s: asc=primary\n", __func__);
260
        asce = env->cregs[1];
261
        break;
262
    case PSW_ASC_SECONDARY:
263
        PTE_DPRINTF("%s: asc=secondary\n", __func__);
264
        asce = env->cregs[7];
265
        break;
266
    case PSW_ASC_HOME:
267
        PTE_DPRINTF("%s: asc=home\n", __func__);
268
        asce = env->cregs[13];
269
        break;
270
    }
271

    
272
    switch (asce & _ASCE_TYPE_MASK) {
273
    case _ASCE_TYPE_REGION1:
274
        break;
275
    case _ASCE_TYPE_REGION2:
276
        if (vaddr & 0xffe0000000000000ULL) {
277
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
278
                    " 0xffe0000000000000ULL\n", __func__, vaddr);
279
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
280
            return -1;
281
        }
282
        break;
283
    case _ASCE_TYPE_REGION3:
284
        if (vaddr & 0xfffffc0000000000ULL) {
285
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
286
                    " 0xfffffc0000000000ULL\n", __func__, vaddr);
287
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
288
            return -1;
289
        }
290
        break;
291
    case _ASCE_TYPE_SEGMENT:
292
        if (vaddr & 0xffffffff80000000ULL) {
293
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
294
                    " 0xffffffff80000000ULL\n", __func__, vaddr);
295
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
296
            return -1;
297
        }
298
        break;
299
    }
300

    
301
    /* fake level above current */
302
    level = asce & _ASCE_TYPE_MASK;
303
    new_level = level + 4;
304
    asce = (asce & ~_ASCE_TYPE_MASK) | (new_level & _ASCE_TYPE_MASK);
305

    
306
    r = mmu_translate_asce(env, vaddr, asc, asce, new_level, raddr, flags, rw);
307

    
308
    if ((rw == 1) && !(*flags & PAGE_WRITE)) {
309
        trigger_prot_fault(env, vaddr, asc);
310
        return -1;
311
    }
312

    
313
    return r;
314
}
315

    
316
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
317
                  target_ulong *raddr, int *flags)
318
{
319
    int r = -1;
320
    uint8_t *sk;
321

    
322
    *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
323
    vaddr &= TARGET_PAGE_MASK;
324

    
325
    if (!(env->psw.mask & PSW_MASK_DAT)) {
326
        *raddr = vaddr;
327
        r = 0;
328
        goto out;
329
    }
330

    
331
    switch (asc) {
332
    case PSW_ASC_PRIMARY:
333
    case PSW_ASC_HOME:
334
        r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw);
335
        break;
336
    case PSW_ASC_SECONDARY:
337
        /*
338
         * Instruction: Primary
339
         * Data: Secondary
340
         */
341
        if (rw == 2) {
342
            r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags,
343
                                  rw);
344
            *flags &= ~(PAGE_READ | PAGE_WRITE);
345
        } else {
346
            r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags,
347
                                  rw);
348
            *flags &= ~(PAGE_EXEC);
349
        }
350
        break;
351
    case PSW_ASC_ACCREG:
352
    default:
353
        hw_error("guest switched to unknown asc mode\n");
354
        break;
355
    }
356

    
357
 out:
358
    /* Convert real address -> absolute address */
359
    if (*raddr < 0x2000) {
360
        *raddr = *raddr + env->psa;
361
    }
362

    
363
    if (*raddr <= ram_size) {
364
        sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE];
365
        if (*flags & PAGE_READ) {
366
            *sk |= SK_R;
367
        }
368

    
369
        if (*flags & PAGE_WRITE) {
370
            *sk |= SK_C;
371
        }
372
    }
373

    
374
    return r;
375
}
376

    
377
int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong orig_vaddr,
378
                               int rw, int mmu_idx)
379
{
380
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
381
    target_ulong vaddr, raddr;
382
    int prot;
383

    
384
    DPRINTF("%s: address 0x%" PRIx64 " rw %d mmu_idx %d\n",
385
            __func__, orig_vaddr, rw, mmu_idx);
386

    
387
    orig_vaddr &= TARGET_PAGE_MASK;
388
    vaddr = orig_vaddr;
389

    
390
    /* 31-Bit mode */
391
    if (!(env->psw.mask & PSW_MASK_64)) {
392
        vaddr &= 0x7fffffff;
393
    }
394

    
395
    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot)) {
396
        /* Translation ended in exception */
397
        return 1;
398
    }
399

    
400
    /* check out of RAM access */
401
    if (raddr > (ram_size + virtio_size)) {
402
        DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
403
                (uint64_t)raddr, (uint64_t)ram_size);
404
        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
405
        return 1;
406
    }
407

    
408
    DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __func__,
409
            (uint64_t)vaddr, (uint64_t)raddr, prot);
410

    
411
    tlb_set_page(env, orig_vaddr, raddr, prot,
412
                 mmu_idx, TARGET_PAGE_SIZE);
413

    
414
    return 0;
415
}
416

    
417
hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
418
{
419
    S390CPU *cpu = S390_CPU(cs);
420
    CPUS390XState *env = &cpu->env;
421
    target_ulong raddr;
422
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
423
    int old_exc = env->exception_index;
424
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
425

    
426
    /* 31-Bit mode */
427
    if (!(env->psw.mask & PSW_MASK_64)) {
428
        vaddr &= 0x7fffffff;
429
    }
430

    
431
    mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
432
    env->exception_index = old_exc;
433

    
434
    return raddr;
435
}
436

    
437
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
438
{
439
    if (mask & PSW_MASK_WAIT) {
440
        S390CPU *cpu = s390_env_get_cpu(env);
441
        CPUState *cs = CPU(cpu);
442
        if (!(mask & (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK))) {
443
            if (s390_del_running_cpu(cpu) == 0) {
444
#ifndef CONFIG_USER_ONLY
445
                qemu_system_shutdown_request();
446
#endif
447
            }
448
        }
449
        cs->halted = 1;
450
        env->exception_index = EXCP_HLT;
451
    }
452

    
453
    env->psw.addr = addr;
454
    env->psw.mask = mask;
455
    env->cc_op = (mask >> 44) & 3;
456
}
457

    
458
static uint64_t get_psw_mask(CPUS390XState *env)
459
{
460
    uint64_t r;
461

    
462
    env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
463

    
464
    r = env->psw.mask;
465
    r &= ~PSW_MASK_CC;
466
    assert(!(env->cc_op & ~3));
467
    r |= (uint64_t)env->cc_op << 44;
468

    
469
    return r;
470
}
471

    
472
static LowCore *cpu_map_lowcore(CPUS390XState *env)
473
{
474
    LowCore *lowcore;
475
    hwaddr len = sizeof(LowCore);
476

    
477
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
478

    
479
    if (len < sizeof(LowCore)) {
480
        cpu_abort(env, "Could not map lowcore\n");
481
    }
482

    
483
    return lowcore;
484
}
485

    
486
static void cpu_unmap_lowcore(LowCore *lowcore)
487
{
488
    cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
489
}
490

    
491
void *s390_cpu_physical_memory_map(CPUS390XState *env, hwaddr addr, hwaddr *len,
492
                                   int is_write)
493
{
494
    hwaddr start = addr;
495

    
496
    /* Mind the prefix area. */
497
    if (addr < 8192) {
498
        /* Map the lowcore. */
499
        start += env->psa;
500
        *len = MIN(*len, 8192 - addr);
501
    } else if ((addr >= env->psa) && (addr < env->psa + 8192)) {
502
        /* Map the 0 page. */
503
        start -= env->psa;
504
        *len = MIN(*len, 8192 - start);
505
    }
506

    
507
    return cpu_physical_memory_map(start, len, is_write);
508
}
509

    
510
void s390_cpu_physical_memory_unmap(CPUS390XState *env, void *addr, hwaddr len,
511
                                    int is_write)
512
{
513
    cpu_physical_memory_unmap(addr, len, is_write, len);
514
}
515

    
516
static void do_svc_interrupt(CPUS390XState *env)
517
{
518
    uint64_t mask, addr;
519
    LowCore *lowcore;
520

    
521
    lowcore = cpu_map_lowcore(env);
522

    
523
    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
524
    lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
525
    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
526
    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
527
    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
528
    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
529

    
530
    cpu_unmap_lowcore(lowcore);
531

    
532
    load_psw(env, mask, addr);
533
}
534

    
535
static void do_program_interrupt(CPUS390XState *env)
536
{
537
    uint64_t mask, addr;
538
    LowCore *lowcore;
539
    int ilen = env->int_pgm_ilen;
540

    
541
    switch (ilen) {
542
    case ILEN_LATER:
543
        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
544
        break;
545
    case ILEN_LATER_INC:
546
        ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
547
        env->psw.addr += ilen;
548
        break;
549
    default:
550
        assert(ilen == 2 || ilen == 4 || ilen == 6);
551
    }
552

    
553
    qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
554
                  __func__, env->int_pgm_code, ilen);
555

    
556
    lowcore = cpu_map_lowcore(env);
557

    
558
    lowcore->pgm_ilen = cpu_to_be16(ilen);
559
    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
560
    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
561
    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
562
    mask = be64_to_cpu(lowcore->program_new_psw.mask);
563
    addr = be64_to_cpu(lowcore->program_new_psw.addr);
564

    
565
    cpu_unmap_lowcore(lowcore);
566

    
567
    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
568
            env->int_pgm_code, ilen, env->psw.mask,
569
            env->psw.addr);
570

    
571
    load_psw(env, mask, addr);
572
}
573

    
574
#define VIRTIO_SUBCODE_64 0x0D00
575

    
576
static void do_ext_interrupt(CPUS390XState *env)
577
{
578
    uint64_t mask, addr;
579
    LowCore *lowcore;
580
    ExtQueue *q;
581

    
582
    if (!(env->psw.mask & PSW_MASK_EXT)) {
583
        cpu_abort(env, "Ext int w/o ext mask\n");
584
    }
585

    
586
    if (env->ext_index < 0 || env->ext_index > MAX_EXT_QUEUE) {
587
        cpu_abort(env, "Ext queue overrun: %d\n", env->ext_index);
588
    }
589

    
590
    q = &env->ext_queue[env->ext_index];
591
    lowcore = cpu_map_lowcore(env);
592

    
593
    lowcore->ext_int_code = cpu_to_be16(q->code);
594
    lowcore->ext_params = cpu_to_be32(q->param);
595
    lowcore->ext_params2 = cpu_to_be64(q->param64);
596
    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
597
    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
598
    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
599
    mask = be64_to_cpu(lowcore->external_new_psw.mask);
600
    addr = be64_to_cpu(lowcore->external_new_psw.addr);
601

    
602
    cpu_unmap_lowcore(lowcore);
603

    
604
    env->ext_index--;
605
    if (env->ext_index == -1) {
606
        env->pending_int &= ~INTERRUPT_EXT;
607
    }
608

    
609
    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
610
            env->psw.mask, env->psw.addr);
611

    
612
    load_psw(env, mask, addr);
613
}
614

    
615
static void do_io_interrupt(CPUS390XState *env)
616
{
617
    LowCore *lowcore;
618
    IOIntQueue *q;
619
    uint8_t isc;
620
    int disable = 1;
621
    int found = 0;
622

    
623
    if (!(env->psw.mask & PSW_MASK_IO)) {
624
        cpu_abort(env, "I/O int w/o I/O mask\n");
625
    }
626

    
627
    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
628
        uint64_t isc_bits;
629

    
630
        if (env->io_index[isc] < 0) {
631
            continue;
632
        }
633
        if (env->io_index[isc] > MAX_IO_QUEUE) {
634
            cpu_abort(env, "I/O queue overrun for isc %d: %d\n",
635
                      isc, env->io_index[isc]);
636
        }
637

    
638
        q = &env->io_queue[env->io_index[isc]][isc];
639
        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
640
        if (!(env->cregs[6] & isc_bits)) {
641
            disable = 0;
642
            continue;
643
        }
644
        if (!found) {
645
            uint64_t mask, addr;
646

    
647
            found = 1;
648
            lowcore = cpu_map_lowcore(env);
649

    
650
            lowcore->subchannel_id = cpu_to_be16(q->id);
651
            lowcore->subchannel_nr = cpu_to_be16(q->nr);
652
            lowcore->io_int_parm = cpu_to_be32(q->parm);
653
            lowcore->io_int_word = cpu_to_be32(q->word);
654
            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
655
            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
656
            mask = be64_to_cpu(lowcore->io_new_psw.mask);
657
            addr = be64_to_cpu(lowcore->io_new_psw.addr);
658

    
659
            cpu_unmap_lowcore(lowcore);
660

    
661
            env->io_index[isc]--;
662

    
663
            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
664
                    env->psw.mask, env->psw.addr);
665
            load_psw(env, mask, addr);
666
        }
667
        if (env->io_index[isc] >= 0) {
668
            disable = 0;
669
        }
670
        continue;
671
    }
672

    
673
    if (disable) {
674
        env->pending_int &= ~INTERRUPT_IO;
675
    }
676

    
677
}
678

    
679
static void do_mchk_interrupt(CPUS390XState *env)
680
{
681
    uint64_t mask, addr;
682
    LowCore *lowcore;
683
    MchkQueue *q;
684
    int i;
685

    
686
    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
687
        cpu_abort(env, "Machine check w/o mchk mask\n");
688
    }
689

    
690
    if (env->mchk_index < 0 || env->mchk_index > MAX_MCHK_QUEUE) {
691
        cpu_abort(env, "Mchk queue overrun: %d\n", env->mchk_index);
692
    }
693

    
694
    q = &env->mchk_queue[env->mchk_index];
695

    
696
    if (q->type != 1) {
697
        /* Don't know how to handle this... */
698
        cpu_abort(env, "Unknown machine check type %d\n", q->type);
699
    }
700
    if (!(env->cregs[14] & (1 << 28))) {
701
        /* CRW machine checks disabled */
702
        return;
703
    }
704

    
705
    lowcore = cpu_map_lowcore(env);
706

    
707
    for (i = 0; i < 16; i++) {
708
        lowcore->floating_pt_save_area[i] = cpu_to_be64(env->fregs[i].ll);
709
        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
710
        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
711
        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
712
    }
713
    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
714
    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
715
    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
716
    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
717
    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
718
    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
719
    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
720

    
721
    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
722
    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
723
    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
724
    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
725
    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
726
    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
727

    
728
    cpu_unmap_lowcore(lowcore);
729

    
730
    env->mchk_index--;
731
    if (env->mchk_index == -1) {
732
        env->pending_int &= ~INTERRUPT_MCHK;
733
    }
734

    
735
    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
736
            env->psw.mask, env->psw.addr);
737

    
738
    load_psw(env, mask, addr);
739
}
740

    
741
void s390_cpu_do_interrupt(CPUState *cs)
742
{
743
    S390CPU *cpu = S390_CPU(cs);
744
    CPUS390XState *env = &cpu->env;
745

    
746
    qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
747
                  __func__, env->exception_index, env->psw.addr);
748

    
749
    s390_add_running_cpu(cpu);
750
    /* handle machine checks */
751
    if ((env->psw.mask & PSW_MASK_MCHECK) &&
752
        (env->exception_index == -1)) {
753
        if (env->pending_int & INTERRUPT_MCHK) {
754
            env->exception_index = EXCP_MCHK;
755
        }
756
    }
757
    /* handle external interrupts */
758
    if ((env->psw.mask & PSW_MASK_EXT) &&
759
        env->exception_index == -1) {
760
        if (env->pending_int & INTERRUPT_EXT) {
761
            /* code is already in env */
762
            env->exception_index = EXCP_EXT;
763
        } else if (env->pending_int & INTERRUPT_TOD) {
764
            cpu_inject_ext(cpu, 0x1004, 0, 0);
765
            env->exception_index = EXCP_EXT;
766
            env->pending_int &= ~INTERRUPT_EXT;
767
            env->pending_int &= ~INTERRUPT_TOD;
768
        } else if (env->pending_int & INTERRUPT_CPUTIMER) {
769
            cpu_inject_ext(cpu, 0x1005, 0, 0);
770
            env->exception_index = EXCP_EXT;
771
            env->pending_int &= ~INTERRUPT_EXT;
772
            env->pending_int &= ~INTERRUPT_TOD;
773
        }
774
    }
775
    /* handle I/O interrupts */
776
    if ((env->psw.mask & PSW_MASK_IO) &&
777
        (env->exception_index == -1)) {
778
        if (env->pending_int & INTERRUPT_IO) {
779
            env->exception_index = EXCP_IO;
780
        }
781
    }
782

    
783
    switch (env->exception_index) {
784
    case EXCP_PGM:
785
        do_program_interrupt(env);
786
        break;
787
    case EXCP_SVC:
788
        do_svc_interrupt(env);
789
        break;
790
    case EXCP_EXT:
791
        do_ext_interrupt(env);
792
        break;
793
    case EXCP_IO:
794
        do_io_interrupt(env);
795
        break;
796
    case EXCP_MCHK:
797
        do_mchk_interrupt(env);
798
        break;
799
    }
800
    env->exception_index = -1;
801

    
802
    if (!env->pending_int) {
803
        cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
804
    }
805
}
806

    
807
#endif /* CONFIG_USER_ONLY */