Statistics
| Branch: | Revision:

root / target-s390x / helper.c @ d5a43964

History | View | Annotate | Download (17.1 kB)

1
/*
2
 *  S/390 helpers
3
 *
4
 *  Copyright (c) 2009 Ulrich Hecht
5
 *  Copyright (c) 2011 Alexander Graf
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include <stdio.h>
22
#include <stdlib.h>
23
#include <string.h>
24

    
25
#include "cpu.h"
26
#include "exec-all.h"
27
#include "gdbstub.h"
28
#include "qemu-common.h"
29
#include "qemu-timer.h"
30

    
31
#if !defined(CONFIG_USER_ONLY)
32
#include <linux/kvm.h>
33
#include "kvm.h"
34
#endif
35

    
36
//#define DEBUG_S390
37
//#define DEBUG_S390_PTE
38
//#define DEBUG_S390_STDOUT
39

    
40
#ifdef DEBUG_S390
41
#ifdef DEBUG_S390_STDOUT
42
#define DPRINTF(fmt, ...) \
43
    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
44
         qemu_log(fmt, ##__VA_ARGS__); } while (0)
45
#else
46
#define DPRINTF(fmt, ...) \
47
    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
48
#endif
49
#else
50
#define DPRINTF(fmt, ...) \
51
    do { } while (0)
52
#endif
53

    
54
#ifdef DEBUG_S390_PTE
55
#define PTE_DPRINTF DPRINTF
56
#else
57
#define PTE_DPRINTF(fmt, ...) \
58
    do { } while (0)
59
#endif
60

    
61
#ifndef CONFIG_USER_ONLY
62
static void s390x_tod_timer(void *opaque)
63
{
64
    CPUState *env = opaque;
65

    
66
    env->pending_int |= INTERRUPT_TOD;
67
    cpu_interrupt(env, CPU_INTERRUPT_HARD);
68
}
69

    
70
static void s390x_cpu_timer(void *opaque)
71
{
72
    CPUState *env = opaque;
73

    
74
    env->pending_int |= INTERRUPT_CPUTIMER;
75
    cpu_interrupt(env, CPU_INTERRUPT_HARD);
76
}
77
#endif
78

    
79
CPUS390XState *cpu_s390x_init(const char *cpu_model)
80
{
81
    CPUS390XState *env;
82
#if !defined (CONFIG_USER_ONLY)
83
    struct tm tm;
84
#endif
85
    static int inited = 0;
86
    static int cpu_num = 0;
87

    
88
    env = qemu_mallocz(sizeof(CPUS390XState));
89
    cpu_exec_init(env);
90
    if (!inited) {
91
        inited = 1;
92
        s390x_translate_init();
93
    }
94

    
95
#if !defined(CONFIG_USER_ONLY)
96
    qemu_get_timedate(&tm, 0);
97
    env->tod_offset = TOD_UNIX_EPOCH +
98
                      (time2tod(mktimegm(&tm)) * 1000000000ULL);
99
    env->tod_basetime = 0;
100
    env->tod_timer = qemu_new_timer_ns(vm_clock, s390x_tod_timer, env);
101
    env->cpu_timer = qemu_new_timer_ns(vm_clock, s390x_cpu_timer, env);
102
#endif
103
    env->cpu_model_str = cpu_model;
104
    env->cpu_num = cpu_num++;
105
    env->ext_index = -1;
106
    cpu_reset(env);
107
    qemu_init_vcpu(env);
108
    return env;
109
}
110

    
111
#if defined(CONFIG_USER_ONLY)
112

    
113
void do_interrupt (CPUState *env)
114
{
115
    env->exception_index = -1;
116
}
117

    
118
int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
119
                              int mmu_idx, int is_softmmu)
120
{
121
    /* fprintf(stderr,"%s: address 0x%lx rw %d mmu_idx %d is_softmmu %d\n",
122
            __FUNCTION__, address, rw, mmu_idx, is_softmmu); */
123
    env->exception_index = EXCP_ADDR;
124
    env->__excp_addr = address; /* FIXME: find out how this works on a real machine */
125
    return 1;
126
}
127

    
128
#endif /* CONFIG_USER_ONLY */
129

    
130
void cpu_reset(CPUS390XState *env)
131
{
132
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
133
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
134
        log_cpu_state(env, 0);
135
    }
136

    
137
    memset(env, 0, offsetof(CPUS390XState, breakpoints));
138
    /* FIXME: reset vector? */
139
    tlb_flush(env, 1);
140
}
141

    
142
#ifndef CONFIG_USER_ONLY
143

    
144
/* Ensure to exit the TB after this call! */
145
static void trigger_pgm_exception(CPUState *env, uint32_t code, uint32_t ilc)
146
{
147
    env->exception_index = EXCP_PGM;
148
    env->int_pgm_code = code;
149
    env->int_pgm_ilc = ilc;
150
}
151

    
152
static int trans_bits(CPUState *env, uint64_t mode)
153
{
154
    int bits = 0;
155

    
156
    switch (mode) {
157
    case PSW_ASC_PRIMARY:
158
        bits = 1;
159
        break;
160
    case PSW_ASC_SECONDARY:
161
        bits = 2;
162
        break;
163
    case PSW_ASC_HOME:
164
        bits = 3;
165
        break;
166
    default:
167
        cpu_abort(env, "unknown asc mode\n");
168
        break;
169
    }
170

    
171
    return bits;
172
}
173

    
174
static void trigger_prot_fault(CPUState *env, target_ulong vaddr, uint64_t mode)
175
{
176
    int ilc = ILC_LATER_INC_2;
177
    int bits = trans_bits(env, mode) | 4;
178

    
179
    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __FUNCTION__, vaddr, bits);
180

    
181
    stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
182
    trigger_pgm_exception(env, PGM_PROTECTION, ilc);
183
}
184

    
185
static void trigger_page_fault(CPUState *env, target_ulong vaddr, uint32_t type,
186
                               uint64_t asc, int rw)
187
{
188
    int ilc = ILC_LATER;
189
    int bits = trans_bits(env, asc);
190

    
191
    if (rw == 2) {
192
        /* code has is undefined ilc */
193
        ilc = 2;
194
    }
195

    
196
    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __FUNCTION__, vaddr, bits);
197

    
198
    stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
199
    trigger_pgm_exception(env, type, ilc);
200
}
201

    
202
static int mmu_translate_asce(CPUState *env, target_ulong vaddr, uint64_t asc,
203
                              uint64_t asce, int level, target_ulong *raddr,
204
                              int *flags, int rw)
205
{
206
    uint64_t offs = 0;
207
    uint64_t origin;
208
    uint64_t new_asce;
209

    
210
    PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __FUNCTION__, asce);
211

    
212
    if (((level != _ASCE_TYPE_SEGMENT) && (asce & _REGION_ENTRY_INV)) ||
213
        ((level == _ASCE_TYPE_SEGMENT) && (asce & _SEGMENT_ENTRY_INV))) {
214
        /* XXX different regions have different faults */
215
        DPRINTF("%s: invalid region\n", __FUNCTION__);
216
        trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw);
217
        return -1;
218
    }
219

    
220
    if ((level <= _ASCE_TYPE_MASK) && ((asce & _ASCE_TYPE_MASK) != level)) {
221
        trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
222
        return -1;
223
    }
224

    
225
    if (asce & _ASCE_REAL_SPACE) {
226
        /* direct mapping */
227

    
228
        *raddr = vaddr;
229
        return 0;
230
    }
231

    
232
    origin = asce & _ASCE_ORIGIN;
233

    
234
    switch (level) {
235
    case _ASCE_TYPE_REGION1 + 4:
236
        offs = (vaddr >> 50) & 0x3ff8;
237
        break;
238
    case _ASCE_TYPE_REGION1:
239
        offs = (vaddr >> 39) & 0x3ff8;
240
        break;
241
    case _ASCE_TYPE_REGION2:
242
        offs = (vaddr >> 28) & 0x3ff8;
243
        break;
244
    case _ASCE_TYPE_REGION3:
245
        offs = (vaddr >> 17) & 0x3ff8;
246
        break;
247
    case _ASCE_TYPE_SEGMENT:
248
        offs = (vaddr >> 9) & 0x07f8;
249
        origin = asce & _SEGMENT_ENTRY_ORIGIN;
250
        break;
251
    }
252

    
253
    /* XXX region protection flags */
254
    /* *flags &= ~PAGE_WRITE */
255

    
256
    new_asce = ldq_phys(origin + offs);
257
    PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
258
                __FUNCTION__, origin, offs, new_asce);
259

    
260
    if (level != _ASCE_TYPE_SEGMENT) {
261
        /* yet another region */
262
        return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr,
263
                                  flags, rw);
264
    }
265

    
266
    /* PTE */
267
    if (new_asce & _PAGE_INVALID) {
268
        DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __FUNCTION__, new_asce);
269
        trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw);
270
        return -1;
271
    }
272

    
273
    if (new_asce & _PAGE_RO) {
274
        *flags &= ~PAGE_WRITE;
275
    }
276

    
277
    *raddr = new_asce & _ASCE_ORIGIN;
278

    
279
    PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __FUNCTION__, new_asce);
280

    
281
    return 0;
282
}
283

    
284
static int mmu_translate_asc(CPUState *env, target_ulong vaddr, uint64_t asc,
285
                             target_ulong *raddr, int *flags, int rw)
286
{
287
    uint64_t asce = 0;
288
    int level, new_level;
289
    int r;
290

    
291
    switch (asc) {
292
    case PSW_ASC_PRIMARY:
293
        PTE_DPRINTF("%s: asc=primary\n", __FUNCTION__);
294
        asce = env->cregs[1];
295
        break;
296
    case PSW_ASC_SECONDARY:
297
        PTE_DPRINTF("%s: asc=secondary\n", __FUNCTION__);
298
        asce = env->cregs[7];
299
        break;
300
    case PSW_ASC_HOME:
301
        PTE_DPRINTF("%s: asc=home\n", __FUNCTION__);
302
        asce = env->cregs[13];
303
        break;
304
    }
305

    
306
    switch (asce & _ASCE_TYPE_MASK) {
307
    case _ASCE_TYPE_REGION1:
308
        break;
309
    case _ASCE_TYPE_REGION2:
310
        if (vaddr & 0xffe0000000000000ULL) {
311
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
312
                        " 0xffe0000000000000ULL\n", __FUNCTION__,
313
                        vaddr);
314
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
315
            return -1;
316
        }
317
        break;
318
    case _ASCE_TYPE_REGION3:
319
        if (vaddr & 0xfffffc0000000000ULL) {
320
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
321
                        " 0xfffffc0000000000ULL\n", __FUNCTION__,
322
                        vaddr);
323
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
324
            return -1;
325
        }
326
        break;
327
    case _ASCE_TYPE_SEGMENT:
328
        if (vaddr & 0xffffffff80000000ULL) {
329
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
330
                        " 0xffffffff80000000ULL\n", __FUNCTION__,
331
                        vaddr);
332
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
333
            return -1;
334
        }
335
        break;
336
    }
337

    
338
    /* fake level above current */
339
    level = asce & _ASCE_TYPE_MASK;
340
    new_level = level + 4;
341
    asce = (asce & ~_ASCE_TYPE_MASK) | (new_level & _ASCE_TYPE_MASK);
342

    
343
    r = mmu_translate_asce(env, vaddr, asc, asce, new_level, raddr, flags, rw);
344

    
345
    if ((rw == 1) && !(*flags & PAGE_WRITE)) {
346
        trigger_prot_fault(env, vaddr, asc);
347
        return -1;
348
    }
349

    
350
    return r;
351
}
352

    
353
int mmu_translate(CPUState *env, target_ulong vaddr, int rw, uint64_t asc,
354
                  target_ulong *raddr, int *flags)
355
{
356
    int r = -1;
357

    
358
    *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
359
    vaddr &= TARGET_PAGE_MASK;
360

    
361
    if (!(env->psw.mask & PSW_MASK_DAT)) {
362
        *raddr = vaddr;
363
        r = 0;
364
        goto out;
365
    }
366

    
367
    switch (asc) {
368
    case PSW_ASC_PRIMARY:
369
    case PSW_ASC_HOME:
370
        r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw);
371
        break;
372
    case PSW_ASC_SECONDARY:
373
        /*
374
         * Instruction: Primary
375
         * Data: Secondary
376
         */
377
        if (rw == 2) {
378
            r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags,
379
                                  rw);
380
            *flags &= ~(PAGE_READ | PAGE_WRITE);
381
        } else {
382
            r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags,
383
                                  rw);
384
            *flags &= ~(PAGE_EXEC);
385
        }
386
        break;
387
    case PSW_ASC_ACCREG:
388
    default:
389
        hw_error("guest switched to unknown asc mode\n");
390
        break;
391
    }
392

    
393
out:
394
    /* Convert real address -> absolute address */
395
    if (*raddr < 0x2000) {
396
        *raddr = *raddr + env->psa;
397
    }
398

    
399
    return r;
400
}
401

    
402
int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong _vaddr, int rw,
403
                                int mmu_idx, int is_softmmu)
404
{
405
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
406
    target_ulong vaddr, raddr;
407
    int prot;
408

    
409
    DPRINTF("%s: address 0x%" PRIx64 " rw %d mmu_idx %d is_softmmu %d\n",
410
            __FUNCTION__, _vaddr, rw, mmu_idx, is_softmmu);
411

    
412
    _vaddr &= TARGET_PAGE_MASK;
413
    vaddr = _vaddr;
414

    
415
    /* 31-Bit mode */
416
    if (!(env->psw.mask & PSW_MASK_64)) {
417
        vaddr &= 0x7fffffff;
418
    }
419

    
420
    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot)) {
421
        /* Translation ended in exception */
422
        return 1;
423
    }
424

    
425
    /* check out of RAM access */
426
    if (raddr > (ram_size + virtio_size)) {
427
        DPRINTF("%s: aaddr %" PRIx64 " > ram_size %" PRIx64 "\n", __FUNCTION__,
428
                (uint64_t)aaddr, (uint64_t)ram_size);
429
        trigger_pgm_exception(env, PGM_ADDRESSING, ILC_LATER);
430
        return 1;
431
    }
432

    
433
    DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __FUNCTION__,
434
            (uint64_t)vaddr, (uint64_t)raddr, prot);
435

    
436
    tlb_set_page(env, _vaddr, raddr, prot,
437
                 mmu_idx, TARGET_PAGE_SIZE);
438

    
439
    return 0;
440
}
441

    
442
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong vaddr)
443
{
444
    target_ulong raddr;
445
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
446
    int old_exc = env->exception_index;
447
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
448

    
449
    /* 31-Bit mode */
450
    if (!(env->psw.mask & PSW_MASK_64)) {
451
        vaddr &= 0x7fffffff;
452
    }
453

    
454
    mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
455
    env->exception_index = old_exc;
456

    
457
    return raddr;
458
}
459

    
460
void load_psw(CPUState *env, uint64_t mask, uint64_t addr)
461
{
462
    if (mask & PSW_MASK_WAIT) {
463
        env->halted = 1;
464
        env->exception_index = EXCP_HLT;
465
        if (!(mask & (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK))) {
466
            /* XXX disabled wait state - CPU is dead */
467
        }
468
    }
469

    
470
    env->psw.addr = addr;
471
    env->psw.mask = mask;
472
    env->cc_op = (mask >> 13) & 3;
473
}
474

    
475
static uint64_t get_psw_mask(CPUState *env)
476
{
477
    uint64_t r = env->psw.mask;
478

    
479
    env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
480

    
481
    r &= ~(3ULL << 13);
482
    assert(!(env->cc_op & ~3));
483
    r |= env->cc_op << 13;
484

    
485
    return r;
486
}
487

    
488
static void do_svc_interrupt(CPUState *env)
489
{
490
    uint64_t mask, addr;
491
    LowCore *lowcore;
492
    target_phys_addr_t len = TARGET_PAGE_SIZE;
493

    
494
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
495

    
496
    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
497
    lowcore->svc_ilc = cpu_to_be16(env->int_svc_ilc);
498
    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
499
    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + (env->int_svc_ilc));
500
    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
501
    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
502

    
503
    cpu_physical_memory_unmap(lowcore, len, 1, len);
504

    
505
    load_psw(env, mask, addr);
506
}
507

    
508
static void do_program_interrupt(CPUState *env)
509
{
510
    uint64_t mask, addr;
511
    LowCore *lowcore;
512
    target_phys_addr_t len = TARGET_PAGE_SIZE;
513
    int ilc = env->int_pgm_ilc;
514

    
515
    switch (ilc) {
516
    case ILC_LATER:
517
        ilc = get_ilc(ldub_code(env->psw.addr));
518
        break;
519
    case ILC_LATER_INC:
520
        ilc = get_ilc(ldub_code(env->psw.addr));
521
        env->psw.addr += ilc * 2;
522
        break;
523
    case ILC_LATER_INC_2:
524
        ilc = get_ilc(ldub_code(env->psw.addr)) * 2;
525
        env->psw.addr += ilc;
526
        break;
527
    }
528

    
529
    qemu_log("%s: code=0x%x ilc=%d\n", __FUNCTION__, env->int_pgm_code, ilc);
530

    
531
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
532

    
533
    lowcore->pgm_ilc = cpu_to_be16(ilc);
534
    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
535
    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
536
    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
537
    mask = be64_to_cpu(lowcore->program_new_psw.mask);
538
    addr = be64_to_cpu(lowcore->program_new_psw.addr);
539

    
540
    cpu_physical_memory_unmap(lowcore, len, 1, len);
541

    
542
    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __FUNCTION__,
543
            env->int_pgm_code, ilc, env->psw.mask,
544
            env->psw.addr);
545

    
546
    load_psw(env, mask, addr);
547
}
548

    
549
#define VIRTIO_SUBCODE_64 0x0D00
550

    
551
static void do_ext_interrupt(CPUState *env)
552
{
553
    uint64_t mask, addr;
554
    LowCore *lowcore;
555
    target_phys_addr_t len = TARGET_PAGE_SIZE;
556
    ExtQueue *q;
557

    
558
    if (!(env->psw.mask & PSW_MASK_EXT)) {
559
        cpu_abort(env, "Ext int w/o ext mask\n");
560
    }
561

    
562
    if (env->ext_index < 0 || env->ext_index > MAX_EXT_QUEUE) {
563
        cpu_abort(env, "Ext queue overrun: %d\n", env->ext_index);
564
    }
565

    
566
    q = &env->ext_queue[env->ext_index];
567
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
568

    
569
    lowcore->ext_int_code = cpu_to_be16(q->code);
570
    lowcore->ext_params = cpu_to_be32(q->param);
571
    lowcore->ext_params2 = cpu_to_be64(q->param64);
572
    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
573
    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
574
    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
575
    mask = be64_to_cpu(lowcore->external_new_psw.mask);
576
    addr = be64_to_cpu(lowcore->external_new_psw.addr);
577

    
578
    cpu_physical_memory_unmap(lowcore, len, 1, len);
579

    
580
    env->ext_index--;
581
    if (env->ext_index == -1) {
582
        env->pending_int &= ~INTERRUPT_EXT;
583
    }
584

    
585
    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __FUNCTION__,
586
            env->psw.mask, env->psw.addr);
587

    
588
    load_psw(env, mask, addr);
589
}
590

    
591
void do_interrupt (CPUState *env)
592
{
593
    qemu_log("%s: %d at pc=%" PRIx64 "\n", __FUNCTION__, env->exception_index,
594
             env->psw.addr);
595

    
596
    /* handle external interrupts */
597
    if ((env->psw.mask & PSW_MASK_EXT) &&
598
        env->exception_index == -1) {
599
        if (env->pending_int & INTERRUPT_EXT) {
600
            /* code is already in env */
601
            env->exception_index = EXCP_EXT;
602
        } else if (env->pending_int & INTERRUPT_TOD) {
603
            cpu_inject_ext(env, 0x1004, 0, 0);
604
            env->exception_index = EXCP_EXT;
605
            env->pending_int &= ~INTERRUPT_EXT;
606
            env->pending_int &= ~INTERRUPT_TOD;
607
        } else if (env->pending_int & INTERRUPT_CPUTIMER) {
608
            cpu_inject_ext(env, 0x1005, 0, 0);
609
            env->exception_index = EXCP_EXT;
610
            env->pending_int &= ~INTERRUPT_EXT;
611
            env->pending_int &= ~INTERRUPT_TOD;
612
        }
613
    }
614

    
615
    switch (env->exception_index) {
616
    case EXCP_PGM:
617
        do_program_interrupt(env);
618
        break;
619
    case EXCP_SVC:
620
        do_svc_interrupt(env);
621
        break;
622
    case EXCP_EXT:
623
        do_ext_interrupt(env);
624
        break;
625
    }
626
    env->exception_index = -1;
627

    
628
    if (!env->pending_int) {
629
        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
630
    }
631
}
632

    
633
#endif /* CONFIG_USER_ONLY */