Statistics
| Branch: | Revision:

root / target-s390x / helper.c @ a8170e5e

History | View | Annotate | Download (16.7 kB)

1
/*
2
 *  S/390 helpers
3
 *
4
 *  Copyright (c) 2009 Ulrich Hecht
5
 *  Copyright (c) 2011 Alexander Graf
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2 of the License, or (at your option) any later version.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include "cpu.h"
22
#include "gdbstub.h"
23
#include "qemu-timer.h"
24
#ifndef CONFIG_USER_ONLY
25
#include "sysemu.h"
26
#endif
27

    
28
//#define DEBUG_S390
29
//#define DEBUG_S390_PTE
30
//#define DEBUG_S390_STDOUT
31

    
32
#ifdef DEBUG_S390
33
#ifdef DEBUG_S390_STDOUT
34
#define DPRINTF(fmt, ...) \
35
    do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36
         qemu_log(fmt, ##__VA_ARGS__); } while (0)
37
#else
38
#define DPRINTF(fmt, ...) \
39
    do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40
#endif
41
#else
42
#define DPRINTF(fmt, ...) \
43
    do { } while (0)
44
#endif
45

    
46
#ifdef DEBUG_S390_PTE
47
#define PTE_DPRINTF DPRINTF
48
#else
49
#define PTE_DPRINTF(fmt, ...) \
50
    do { } while (0)
51
#endif
52

    
53
#ifndef CONFIG_USER_ONLY
54
void s390x_tod_timer(void *opaque)
55
{
56
    S390CPU *cpu = opaque;
57
    CPUS390XState *env = &cpu->env;
58

    
59
    env->pending_int |= INTERRUPT_TOD;
60
    cpu_interrupt(env, CPU_INTERRUPT_HARD);
61
}
62

    
63
void s390x_cpu_timer(void *opaque)
64
{
65
    S390CPU *cpu = opaque;
66
    CPUS390XState *env = &cpu->env;
67

    
68
    env->pending_int |= INTERRUPT_CPUTIMER;
69
    cpu_interrupt(env, CPU_INTERRUPT_HARD);
70
}
71
#endif
72

    
73
S390CPU *cpu_s390x_init(const char *cpu_model)
74
{
75
    S390CPU *cpu;
76
    CPUS390XState *env;
77
    static int inited;
78

    
79
    cpu = S390_CPU(object_new(TYPE_S390_CPU));
80
    env = &cpu->env;
81

    
82
    if (tcg_enabled() && !inited) {
83
        inited = 1;
84
        s390x_translate_init();
85
    }
86

    
87
    env->cpu_model_str = cpu_model;
88
    qemu_init_vcpu(env);
89
    return cpu;
90
}
91

    
92
#if defined(CONFIG_USER_ONLY)
93

    
94
void do_interrupt(CPUS390XState *env)
95
{
96
    env->exception_index = -1;
97
}
98

    
99
int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong address,
100
                               int rw, int mmu_idx)
101
{
102
    /* fprintf(stderr, "%s: address 0x%lx rw %d mmu_idx %d\n",
103
       __func__, address, rw, mmu_idx); */
104
    env->exception_index = EXCP_ADDR;
105
    /* FIXME: find out how this works on a real machine */
106
    env->__excp_addr = address;
107
    return 1;
108
}
109

    
110
#else /* !CONFIG_USER_ONLY */
111

    
112
/* Ensure to exit the TB after this call! */
113
static void trigger_pgm_exception(CPUS390XState *env, uint32_t code,
114
                                  uint32_t ilc)
115
{
116
    env->exception_index = EXCP_PGM;
117
    env->int_pgm_code = code;
118
    env->int_pgm_ilc = ilc;
119
}
120

    
121
static int trans_bits(CPUS390XState *env, uint64_t mode)
122
{
123
    int bits = 0;
124

    
125
    switch (mode) {
126
    case PSW_ASC_PRIMARY:
127
        bits = 1;
128
        break;
129
    case PSW_ASC_SECONDARY:
130
        bits = 2;
131
        break;
132
    case PSW_ASC_HOME:
133
        bits = 3;
134
        break;
135
    default:
136
        cpu_abort(env, "unknown asc mode\n");
137
        break;
138
    }
139

    
140
    return bits;
141
}
142

    
143
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
144
                               uint64_t mode)
145
{
146
    int ilc = ILC_LATER_INC_2;
147
    int bits = trans_bits(env, mode) | 4;
148

    
149
    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
150

    
151
    stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
152
    trigger_pgm_exception(env, PGM_PROTECTION, ilc);
153
}
154

    
155
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
156
                               uint32_t type, uint64_t asc, int rw)
157
{
158
    int ilc = ILC_LATER;
159
    int bits = trans_bits(env, asc);
160

    
161
    if (rw == 2) {
162
        /* code has is undefined ilc */
163
        ilc = 2;
164
    }
165

    
166
    DPRINTF("%s: vaddr=%016" PRIx64 " bits=%d\n", __func__, vaddr, bits);
167

    
168
    stq_phys(env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits);
169
    trigger_pgm_exception(env, type, ilc);
170
}
171

    
172
static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
173
                              uint64_t asc, uint64_t asce, int level,
174
                              target_ulong *raddr, int *flags, int rw)
175
{
176
    uint64_t offs = 0;
177
    uint64_t origin;
178
    uint64_t new_asce;
179

    
180
    PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, asce);
181

    
182
    if (((level != _ASCE_TYPE_SEGMENT) && (asce & _REGION_ENTRY_INV)) ||
183
        ((level == _ASCE_TYPE_SEGMENT) && (asce & _SEGMENT_ENTRY_INV))) {
184
        /* XXX different regions have different faults */
185
        DPRINTF("%s: invalid region\n", __func__);
186
        trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw);
187
        return -1;
188
    }
189

    
190
    if ((level <= _ASCE_TYPE_MASK) && ((asce & _ASCE_TYPE_MASK) != level)) {
191
        trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
192
        return -1;
193
    }
194

    
195
    if (asce & _ASCE_REAL_SPACE) {
196
        /* direct mapping */
197

    
198
        *raddr = vaddr;
199
        return 0;
200
    }
201

    
202
    origin = asce & _ASCE_ORIGIN;
203

    
204
    switch (level) {
205
    case _ASCE_TYPE_REGION1 + 4:
206
        offs = (vaddr >> 50) & 0x3ff8;
207
        break;
208
    case _ASCE_TYPE_REGION1:
209
        offs = (vaddr >> 39) & 0x3ff8;
210
        break;
211
    case _ASCE_TYPE_REGION2:
212
        offs = (vaddr >> 28) & 0x3ff8;
213
        break;
214
    case _ASCE_TYPE_REGION3:
215
        offs = (vaddr >> 17) & 0x3ff8;
216
        break;
217
    case _ASCE_TYPE_SEGMENT:
218
        offs = (vaddr >> 9) & 0x07f8;
219
        origin = asce & _SEGMENT_ENTRY_ORIGIN;
220
        break;
221
    }
222

    
223
    /* XXX region protection flags */
224
    /* *flags &= ~PAGE_WRITE */
225

    
226
    new_asce = ldq_phys(origin + offs);
227
    PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
228
                __func__, origin, offs, new_asce);
229

    
230
    if (level != _ASCE_TYPE_SEGMENT) {
231
        /* yet another region */
232
        return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr,
233
                                  flags, rw);
234
    }
235

    
236
    /* PTE */
237
    if (new_asce & _PAGE_INVALID) {
238
        DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, new_asce);
239
        trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw);
240
        return -1;
241
    }
242

    
243
    if (new_asce & _PAGE_RO) {
244
        *flags &= ~PAGE_WRITE;
245
    }
246

    
247
    *raddr = new_asce & _ASCE_ORIGIN;
248

    
249
    PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, new_asce);
250

    
251
    return 0;
252
}
253

    
254
static int mmu_translate_asc(CPUS390XState *env, target_ulong vaddr,
255
                             uint64_t asc, target_ulong *raddr, int *flags,
256
                             int rw)
257
{
258
    uint64_t asce = 0;
259
    int level, new_level;
260
    int r;
261

    
262
    switch (asc) {
263
    case PSW_ASC_PRIMARY:
264
        PTE_DPRINTF("%s: asc=primary\n", __func__);
265
        asce = env->cregs[1];
266
        break;
267
    case PSW_ASC_SECONDARY:
268
        PTE_DPRINTF("%s: asc=secondary\n", __func__);
269
        asce = env->cregs[7];
270
        break;
271
    case PSW_ASC_HOME:
272
        PTE_DPRINTF("%s: asc=home\n", __func__);
273
        asce = env->cregs[13];
274
        break;
275
    }
276

    
277
    switch (asce & _ASCE_TYPE_MASK) {
278
    case _ASCE_TYPE_REGION1:
279
        break;
280
    case _ASCE_TYPE_REGION2:
281
        if (vaddr & 0xffe0000000000000ULL) {
282
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
283
                    " 0xffe0000000000000ULL\n", __func__, vaddr);
284
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
285
            return -1;
286
        }
287
        break;
288
    case _ASCE_TYPE_REGION3:
289
        if (vaddr & 0xfffffc0000000000ULL) {
290
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
291
                    " 0xfffffc0000000000ULL\n", __func__, vaddr);
292
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
293
            return -1;
294
        }
295
        break;
296
    case _ASCE_TYPE_SEGMENT:
297
        if (vaddr & 0xffffffff80000000ULL) {
298
            DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
299
                    " 0xffffffff80000000ULL\n", __func__, vaddr);
300
            trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw);
301
            return -1;
302
        }
303
        break;
304
    }
305

    
306
    /* fake level above current */
307
    level = asce & _ASCE_TYPE_MASK;
308
    new_level = level + 4;
309
    asce = (asce & ~_ASCE_TYPE_MASK) | (new_level & _ASCE_TYPE_MASK);
310

    
311
    r = mmu_translate_asce(env, vaddr, asc, asce, new_level, raddr, flags, rw);
312

    
313
    if ((rw == 1) && !(*flags & PAGE_WRITE)) {
314
        trigger_prot_fault(env, vaddr, asc);
315
        return -1;
316
    }
317

    
318
    return r;
319
}
320

    
321
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
322
                  target_ulong *raddr, int *flags)
323
{
324
    int r = -1;
325
    uint8_t *sk;
326

    
327
    *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
328
    vaddr &= TARGET_PAGE_MASK;
329

    
330
    if (!(env->psw.mask & PSW_MASK_DAT)) {
331
        *raddr = vaddr;
332
        r = 0;
333
        goto out;
334
    }
335

    
336
    switch (asc) {
337
    case PSW_ASC_PRIMARY:
338
    case PSW_ASC_HOME:
339
        r = mmu_translate_asc(env, vaddr, asc, raddr, flags, rw);
340
        break;
341
    case PSW_ASC_SECONDARY:
342
        /*
343
         * Instruction: Primary
344
         * Data: Secondary
345
         */
346
        if (rw == 2) {
347
            r = mmu_translate_asc(env, vaddr, PSW_ASC_PRIMARY, raddr, flags,
348
                                  rw);
349
            *flags &= ~(PAGE_READ | PAGE_WRITE);
350
        } else {
351
            r = mmu_translate_asc(env, vaddr, PSW_ASC_SECONDARY, raddr, flags,
352
                                  rw);
353
            *flags &= ~(PAGE_EXEC);
354
        }
355
        break;
356
    case PSW_ASC_ACCREG:
357
    default:
358
        hw_error("guest switched to unknown asc mode\n");
359
        break;
360
    }
361

    
362
 out:
363
    /* Convert real address -> absolute address */
364
    if (*raddr < 0x2000) {
365
        *raddr = *raddr + env->psa;
366
    }
367

    
368
    if (*raddr <= ram_size) {
369
        sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE];
370
        if (*flags & PAGE_READ) {
371
            *sk |= SK_R;
372
        }
373

    
374
        if (*flags & PAGE_WRITE) {
375
            *sk |= SK_C;
376
        }
377
    }
378

    
379
    return r;
380
}
381

    
382
int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong orig_vaddr,
383
                               int rw, int mmu_idx)
384
{
385
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
386
    target_ulong vaddr, raddr;
387
    int prot;
388

    
389
    DPRINTF("%s: address 0x%" PRIx64 " rw %d mmu_idx %d\n",
390
            __func__, _vaddr, rw, mmu_idx);
391

    
392
    orig_vaddr &= TARGET_PAGE_MASK;
393
    vaddr = orig_vaddr;
394

    
395
    /* 31-Bit mode */
396
    if (!(env->psw.mask & PSW_MASK_64)) {
397
        vaddr &= 0x7fffffff;
398
    }
399

    
400
    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot)) {
401
        /* Translation ended in exception */
402
        return 1;
403
    }
404

    
405
    /* check out of RAM access */
406
    if (raddr > (ram_size + virtio_size)) {
407
        DPRINTF("%s: aaddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
408
                (uint64_t)aaddr, (uint64_t)ram_size);
409
        trigger_pgm_exception(env, PGM_ADDRESSING, ILC_LATER);
410
        return 1;
411
    }
412

    
413
    DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __func__,
414
            (uint64_t)vaddr, (uint64_t)raddr, prot);
415

    
416
    tlb_set_page(env, orig_vaddr, raddr, prot,
417
                 mmu_idx, TARGET_PAGE_SIZE);
418

    
419
    return 0;
420
}
421

    
422
hwaddr cpu_get_phys_page_debug(CPUS390XState *env,
423
                                           target_ulong vaddr)
424
{
425
    target_ulong raddr;
426
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
427
    int old_exc = env->exception_index;
428
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
429

    
430
    /* 31-Bit mode */
431
    if (!(env->psw.mask & PSW_MASK_64)) {
432
        vaddr &= 0x7fffffff;
433
    }
434

    
435
    mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
436
    env->exception_index = old_exc;
437

    
438
    return raddr;
439
}
440

    
441
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
442
{
443
    if (mask & PSW_MASK_WAIT) {
444
        if (!(mask & (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK))) {
445
            if (s390_del_running_cpu(env) == 0) {
446
#ifndef CONFIG_USER_ONLY
447
                qemu_system_shutdown_request();
448
#endif
449
            }
450
        }
451
        env->halted = 1;
452
        env->exception_index = EXCP_HLT;
453
    }
454

    
455
    env->psw.addr = addr;
456
    env->psw.mask = mask;
457
    env->cc_op = (mask >> 13) & 3;
458
}
459

    
460
static uint64_t get_psw_mask(CPUS390XState *env)
461
{
462
    uint64_t r = env->psw.mask;
463

    
464
    env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
465

    
466
    r &= ~(3ULL << 13);
467
    assert(!(env->cc_op & ~3));
468
    r |= env->cc_op << 13;
469

    
470
    return r;
471
}
472

    
473
static void do_svc_interrupt(CPUS390XState *env)
474
{
475
    uint64_t mask, addr;
476
    LowCore *lowcore;
477
    hwaddr len = TARGET_PAGE_SIZE;
478

    
479
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
480

    
481
    lowcore->svc_code = cpu_to_be16(env->int_svc_code);
482
    lowcore->svc_ilc = cpu_to_be16(env->int_svc_ilc);
483
    lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
484
    lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + (env->int_svc_ilc));
485
    mask = be64_to_cpu(lowcore->svc_new_psw.mask);
486
    addr = be64_to_cpu(lowcore->svc_new_psw.addr);
487

    
488
    cpu_physical_memory_unmap(lowcore, len, 1, len);
489

    
490
    load_psw(env, mask, addr);
491
}
492

    
493
static void do_program_interrupt(CPUS390XState *env)
494
{
495
    uint64_t mask, addr;
496
    LowCore *lowcore;
497
    hwaddr len = TARGET_PAGE_SIZE;
498
    int ilc = env->int_pgm_ilc;
499

    
500
    switch (ilc) {
501
    case ILC_LATER:
502
        ilc = get_ilc(cpu_ldub_code(env, env->psw.addr));
503
        break;
504
    case ILC_LATER_INC:
505
        ilc = get_ilc(cpu_ldub_code(env, env->psw.addr));
506
        env->psw.addr += ilc * 2;
507
        break;
508
    case ILC_LATER_INC_2:
509
        ilc = get_ilc(cpu_ldub_code(env, env->psw.addr)) * 2;
510
        env->psw.addr += ilc;
511
        break;
512
    }
513

    
514
    qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilc=%d\n",
515
                  __func__, env->int_pgm_code, ilc);
516

    
517
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
518

    
519
    lowcore->pgm_ilc = cpu_to_be16(ilc);
520
    lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
521
    lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
522
    lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
523
    mask = be64_to_cpu(lowcore->program_new_psw.mask);
524
    addr = be64_to_cpu(lowcore->program_new_psw.addr);
525

    
526
    cpu_physical_memory_unmap(lowcore, len, 1, len);
527

    
528
    DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
529
            env->int_pgm_code, ilc, env->psw.mask,
530
            env->psw.addr);
531

    
532
    load_psw(env, mask, addr);
533
}
534

    
535
#define VIRTIO_SUBCODE_64 0x0D00
536

    
537
static void do_ext_interrupt(CPUS390XState *env)
538
{
539
    uint64_t mask, addr;
540
    LowCore *lowcore;
541
    hwaddr len = TARGET_PAGE_SIZE;
542
    ExtQueue *q;
543

    
544
    if (!(env->psw.mask & PSW_MASK_EXT)) {
545
        cpu_abort(env, "Ext int w/o ext mask\n");
546
    }
547

    
548
    if (env->ext_index < 0 || env->ext_index > MAX_EXT_QUEUE) {
549
        cpu_abort(env, "Ext queue overrun: %d\n", env->ext_index);
550
    }
551

    
552
    q = &env->ext_queue[env->ext_index];
553
    lowcore = cpu_physical_memory_map(env->psa, &len, 1);
554

    
555
    lowcore->ext_int_code = cpu_to_be16(q->code);
556
    lowcore->ext_params = cpu_to_be32(q->param);
557
    lowcore->ext_params2 = cpu_to_be64(q->param64);
558
    lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
559
    lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
560
    lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
561
    mask = be64_to_cpu(lowcore->external_new_psw.mask);
562
    addr = be64_to_cpu(lowcore->external_new_psw.addr);
563

    
564
    cpu_physical_memory_unmap(lowcore, len, 1, len);
565

    
566
    env->ext_index--;
567
    if (env->ext_index == -1) {
568
        env->pending_int &= ~INTERRUPT_EXT;
569
    }
570

    
571
    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
572
            env->psw.mask, env->psw.addr);
573

    
574
    load_psw(env, mask, addr);
575
}
576

    
577
void do_interrupt(CPUS390XState *env)
578
{
579
    qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
580
                  __func__, env->exception_index, env->psw.addr);
581

    
582
    s390_add_running_cpu(env);
583
    /* handle external interrupts */
584
    if ((env->psw.mask & PSW_MASK_EXT) &&
585
        env->exception_index == -1) {
586
        if (env->pending_int & INTERRUPT_EXT) {
587
            /* code is already in env */
588
            env->exception_index = EXCP_EXT;
589
        } else if (env->pending_int & INTERRUPT_TOD) {
590
            cpu_inject_ext(env, 0x1004, 0, 0);
591
            env->exception_index = EXCP_EXT;
592
            env->pending_int &= ~INTERRUPT_EXT;
593
            env->pending_int &= ~INTERRUPT_TOD;
594
        } else if (env->pending_int & INTERRUPT_CPUTIMER) {
595
            cpu_inject_ext(env, 0x1005, 0, 0);
596
            env->exception_index = EXCP_EXT;
597
            env->pending_int &= ~INTERRUPT_EXT;
598
            env->pending_int &= ~INTERRUPT_TOD;
599
        }
600
    }
601

    
602
    switch (env->exception_index) {
603
    case EXCP_PGM:
604
        do_program_interrupt(env);
605
        break;
606
    case EXCP_SVC:
607
        do_svc_interrupt(env);
608
        break;
609
    case EXCP_EXT:
610
        do_ext_interrupt(env);
611
        break;
612
    }
613
    env->exception_index = -1;
614

    
615
    if (!env->pending_int) {
616
        env->interrupt_request &= ~CPU_INTERRUPT_HARD;
617
    }
618
}
619

    
620
#endif /* CONFIG_USER_ONLY */