Statistics
| Branch: | Revision:

root / hw / ppc / xics.c @ e03c902c

History | View | Annotate | Download (15 kB)

1
/*
2
 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3
 *
4
 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
5
 *
6
 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a copy
9
 * of this software and associated documentation files (the "Software"), to deal
10
 * in the Software without restriction, including without limitation the rights
11
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
 * copies of the Software, and to permit persons to whom the Software is
13
 * furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice shall be included in
16
 * all copies or substantial portions of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24
 * THE SOFTWARE.
25
 *
26
 */
27

    
28
#include "hw/hw.h"
29
#include "trace.h"
30
#include "hw/ppc/spapr.h"
31
#include "hw/ppc/xics.h"
32

    
33
/*
34
 * ICP: Presentation layer
35
 */
36

    
37
struct icp_server_state {
38
    uint32_t xirr;
39
    uint8_t pending_priority;
40
    uint8_t mfrr;
41
    qemu_irq output;
42
};
43

    
44
#define XISR_MASK  0x00ffffff
45
#define CPPR_MASK  0xff000000
46

    
47
#define XISR(ss)   (((ss)->xirr) & XISR_MASK)
48
#define CPPR(ss)   (((ss)->xirr) >> 24)
49

    
50
struct ics_state;
51

    
52
struct icp_state {
53
    long nr_servers;
54
    struct icp_server_state *ss;
55
    struct ics_state *ics;
56
};
57

    
58
static void ics_reject(struct ics_state *ics, int nr);
59
static void ics_resend(struct ics_state *ics);
60
static void ics_eoi(struct ics_state *ics, int nr);
61

    
62
static void icp_check_ipi(struct icp_state *icp, int server)
63
{
64
    struct icp_server_state *ss = icp->ss + server;
65

    
66
    if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
67
        return;
68
    }
69

    
70
    trace_xics_icp_check_ipi(server, ss->mfrr);
71

    
72
    if (XISR(ss)) {
73
        ics_reject(icp->ics, XISR(ss));
74
    }
75

    
76
    ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
77
    ss->pending_priority = ss->mfrr;
78
    qemu_irq_raise(ss->output);
79
}
80

    
81
static void icp_resend(struct icp_state *icp, int server)
82
{
83
    struct icp_server_state *ss = icp->ss + server;
84

    
85
    if (ss->mfrr < CPPR(ss)) {
86
        icp_check_ipi(icp, server);
87
    }
88
    ics_resend(icp->ics);
89
}
90

    
91
static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
92
{
93
    struct icp_server_state *ss = icp->ss + server;
94
    uint8_t old_cppr;
95
    uint32_t old_xisr;
96

    
97
    old_cppr = CPPR(ss);
98
    ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
99

    
100
    if (cppr < old_cppr) {
101
        if (XISR(ss) && (cppr <= ss->pending_priority)) {
102
            old_xisr = XISR(ss);
103
            ss->xirr &= ~XISR_MASK; /* Clear XISR */
104
            ss->pending_priority = 0xff;
105
            qemu_irq_lower(ss->output);
106
            ics_reject(icp->ics, old_xisr);
107
        }
108
    } else {
109
        if (!XISR(ss)) {
110
            icp_resend(icp, server);
111
        }
112
    }
113
}
114

    
115
static void icp_set_mfrr(struct icp_state *icp, int server, uint8_t mfrr)
116
{
117
    struct icp_server_state *ss = icp->ss + server;
118

    
119
    ss->mfrr = mfrr;
120
    if (mfrr < CPPR(ss)) {
121
        icp_check_ipi(icp, server);
122
    }
123
}
124

    
125
static uint32_t icp_accept(struct icp_server_state *ss)
126
{
127
    uint32_t xirr = ss->xirr;
128

    
129
    qemu_irq_lower(ss->output);
130
    ss->xirr = ss->pending_priority << 24;
131
    ss->pending_priority = 0xff;
132

    
133
    trace_xics_icp_accept(xirr, ss->xirr);
134

    
135
    return xirr;
136
}
137

    
138
static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
139
{
140
    struct icp_server_state *ss = icp->ss + server;
141

    
142
    /* Send EOI -> ICS */
143
    ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
144
    trace_xics_icp_eoi(server, xirr, ss->xirr);
145
    ics_eoi(icp->ics, xirr & XISR_MASK);
146
    if (!XISR(ss)) {
147
        icp_resend(icp, server);
148
    }
149
}
150

    
151
static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
152
{
153
    struct icp_server_state *ss = icp->ss + server;
154

    
155
    trace_xics_icp_irq(server, nr, priority);
156

    
157
    if ((priority >= CPPR(ss))
158
        || (XISR(ss) && (ss->pending_priority <= priority))) {
159
        ics_reject(icp->ics, nr);
160
    } else {
161
        if (XISR(ss)) {
162
            ics_reject(icp->ics, XISR(ss));
163
        }
164
        ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
165
        ss->pending_priority = priority;
166
        trace_xics_icp_raise(ss->xirr, ss->pending_priority);
167
        qemu_irq_raise(ss->output);
168
    }
169
}
170

    
171
/*
172
 * ICS: Source layer
173
 */
174

    
175
struct ics_irq_state {
176
    int server;
177
    uint8_t priority;
178
    uint8_t saved_priority;
179
#define XICS_STATUS_ASSERTED           0x1
180
#define XICS_STATUS_SENT               0x2
181
#define XICS_STATUS_REJECTED           0x4
182
#define XICS_STATUS_MASKED_PENDING     0x8
183
    uint8_t status;
184
};
185

    
186
struct ics_state {
187
    int nr_irqs;
188
    int offset;
189
    qemu_irq *qirqs;
190
    bool *islsi;
191
    struct ics_irq_state *irqs;
192
    struct icp_state *icp;
193
};
194

    
195
static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
196
{
197
    return (nr >= ics->offset)
198
        && (nr < (ics->offset + ics->nr_irqs));
199
}
200

    
201
static void resend_msi(struct ics_state *ics, int srcno)
202
{
203
    struct ics_irq_state *irq = ics->irqs + srcno;
204

    
205
    /* FIXME: filter by server#? */
206
    if (irq->status & XICS_STATUS_REJECTED) {
207
        irq->status &= ~XICS_STATUS_REJECTED;
208
        if (irq->priority != 0xff) {
209
            icp_irq(ics->icp, irq->server, srcno + ics->offset,
210
                    irq->priority);
211
        }
212
    }
213
}
214

    
215
static void resend_lsi(struct ics_state *ics, int srcno)
216
{
217
    struct ics_irq_state *irq = ics->irqs + srcno;
218

    
219
    if ((irq->priority != 0xff)
220
        && (irq->status & XICS_STATUS_ASSERTED)
221
        && !(irq->status & XICS_STATUS_SENT)) {
222
        irq->status |= XICS_STATUS_SENT;
223
        icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
224
    }
225
}
226

    
227
static void set_irq_msi(struct ics_state *ics, int srcno, int val)
228
{
229
    struct ics_irq_state *irq = ics->irqs + srcno;
230

    
231
    trace_xics_set_irq_msi(srcno, srcno + ics->offset);
232

    
233
    if (val) {
234
        if (irq->priority == 0xff) {
235
            irq->status |= XICS_STATUS_MASKED_PENDING;
236
            trace_xics_masked_pending();
237
        } else  {
238
            icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
239
        }
240
    }
241
}
242

    
243
static void set_irq_lsi(struct ics_state *ics, int srcno, int val)
244
{
245
    struct ics_irq_state *irq = ics->irqs + srcno;
246

    
247
    trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
248
    if (val) {
249
        irq->status |= XICS_STATUS_ASSERTED;
250
    } else {
251
        irq->status &= ~XICS_STATUS_ASSERTED;
252
    }
253
    resend_lsi(ics, srcno);
254
}
255

    
256
static void ics_set_irq(void *opaque, int srcno, int val)
257
{
258
    struct ics_state *ics = (struct ics_state *)opaque;
259

    
260
    if (ics->islsi[srcno]) {
261
        set_irq_lsi(ics, srcno, val);
262
    } else {
263
        set_irq_msi(ics, srcno, val);
264
    }
265
}
266

    
267
static void write_xive_msi(struct ics_state *ics, int srcno)
268
{
269
    struct ics_irq_state *irq = ics->irqs + srcno;
270

    
271
    if (!(irq->status & XICS_STATUS_MASKED_PENDING)
272
        || (irq->priority == 0xff)) {
273
        return;
274
    }
275

    
276
    irq->status &= ~XICS_STATUS_MASKED_PENDING;
277
    icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
278
}
279

    
280
static void write_xive_lsi(struct ics_state *ics, int srcno)
281
{
282
    resend_lsi(ics, srcno);
283
}
284

    
285
static void ics_write_xive(struct ics_state *ics, int nr, int server,
286
                           uint8_t priority, uint8_t saved_priority)
287
{
288
    int srcno = nr - ics->offset;
289
    struct ics_irq_state *irq = ics->irqs + srcno;
290

    
291
    irq->server = server;
292
    irq->priority = priority;
293
    irq->saved_priority = saved_priority;
294

    
295
    trace_xics_ics_write_xive(nr, srcno, server, priority);
296

    
297
    if (ics->islsi[srcno]) {
298
        write_xive_lsi(ics, srcno);
299
    } else {
300
        write_xive_msi(ics, srcno);
301
    }
302
}
303

    
304
static void ics_reject(struct ics_state *ics, int nr)
305
{
306
    struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
307

    
308
    trace_xics_ics_reject(nr, nr - ics->offset);
309
    irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
310
    irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
311
}
312

    
313
static void ics_resend(struct ics_state *ics)
314
{
315
    int i;
316

    
317
    for (i = 0; i < ics->nr_irqs; i++) {
318
        /* FIXME: filter by server#? */
319
        if (ics->islsi[i]) {
320
            resend_lsi(ics, i);
321
        } else {
322
            resend_msi(ics, i);
323
        }
324
    }
325
}
326

    
327
static void ics_eoi(struct ics_state *ics, int nr)
328
{
329
    int srcno = nr - ics->offset;
330
    struct ics_irq_state *irq = ics->irqs + srcno;
331

    
332
    trace_xics_ics_eoi(nr);
333

    
334
    if (ics->islsi[srcno]) {
335
        irq->status &= ~XICS_STATUS_SENT;
336
    }
337
}
338

    
339
/*
340
 * Exported functions
341
 */
342

    
343
qemu_irq xics_get_qirq(struct icp_state *icp, int irq)
344
{
345
    if (!ics_valid_irq(icp->ics, irq)) {
346
        return NULL;
347
    }
348

    
349
    return icp->ics->qirqs[irq - icp->ics->offset];
350
}
351

    
352
void xics_set_irq_type(struct icp_state *icp, int irq, bool lsi)
353
{
354
    assert(ics_valid_irq(icp->ics, irq));
355

    
356
    icp->ics->islsi[irq - icp->ics->offset] = lsi;
357
}
358

    
359
static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
360
                           target_ulong opcode, target_ulong *args)
361
{
362
    CPUState *cs = CPU(cpu);
363
    target_ulong cppr = args[0];
364

    
365
    icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
366
    return H_SUCCESS;
367
}
368

    
369
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
370
                          target_ulong opcode, target_ulong *args)
371
{
372
    target_ulong server = args[0];
373
    target_ulong mfrr = args[1];
374

    
375
    if (server >= spapr->icp->nr_servers) {
376
        return H_PARAMETER;
377
    }
378

    
379
    icp_set_mfrr(spapr->icp, server, mfrr);
380
    return H_SUCCESS;
381
}
382

    
383
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
384
                           target_ulong opcode, target_ulong *args)
385
{
386
    CPUState *cs = CPU(cpu);
387
    uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
388

    
389
    args[0] = xirr;
390
    return H_SUCCESS;
391
}
392

    
393
static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
394
                          target_ulong opcode, target_ulong *args)
395
{
396
    CPUState *cs = CPU(cpu);
397
    target_ulong xirr = args[0];
398

    
399
    icp_eoi(spapr->icp, cs->cpu_index, xirr);
400
    return H_SUCCESS;
401
}
402

    
403
static void rtas_set_xive(sPAPREnvironment *spapr, uint32_t token,
404
                          uint32_t nargs, target_ulong args,
405
                          uint32_t nret, target_ulong rets)
406
{
407
    struct ics_state *ics = spapr->icp->ics;
408
    uint32_t nr, server, priority;
409

    
410
    if ((nargs != 3) || (nret != 1)) {
411
        rtas_st(rets, 0, -3);
412
        return;
413
    }
414

    
415
    nr = rtas_ld(args, 0);
416
    server = rtas_ld(args, 1);
417
    priority = rtas_ld(args, 2);
418

    
419
    if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
420
        || (priority > 0xff)) {
421
        rtas_st(rets, 0, -3);
422
        return;
423
    }
424

    
425
    ics_write_xive(ics, nr, server, priority, priority);
426

    
427
    rtas_st(rets, 0, 0); /* Success */
428
}
429

    
430
static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
431
                          uint32_t nargs, target_ulong args,
432
                          uint32_t nret, target_ulong rets)
433
{
434
    struct ics_state *ics = spapr->icp->ics;
435
    uint32_t nr;
436

    
437
    if ((nargs != 1) || (nret != 3)) {
438
        rtas_st(rets, 0, -3);
439
        return;
440
    }
441

    
442
    nr = rtas_ld(args, 0);
443

    
444
    if (!ics_valid_irq(ics, nr)) {
445
        rtas_st(rets, 0, -3);
446
        return;
447
    }
448

    
449
    rtas_st(rets, 0, 0); /* Success */
450
    rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
451
    rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
452
}
453

    
454
static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token,
455
                         uint32_t nargs, target_ulong args,
456
                         uint32_t nret, target_ulong rets)
457
{
458
    struct ics_state *ics = spapr->icp->ics;
459
    uint32_t nr;
460

    
461
    if ((nargs != 1) || (nret != 1)) {
462
        rtas_st(rets, 0, -3);
463
        return;
464
    }
465

    
466
    nr = rtas_ld(args, 0);
467

    
468
    if (!ics_valid_irq(ics, nr)) {
469
        rtas_st(rets, 0, -3);
470
        return;
471
    }
472

    
473
    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
474
                   ics->irqs[nr - ics->offset].priority);
475

    
476
    rtas_st(rets, 0, 0); /* Success */
477
}
478

    
479
static void rtas_int_on(sPAPREnvironment *spapr, uint32_t token,
480
                        uint32_t nargs, target_ulong args,
481
                        uint32_t nret, target_ulong rets)
482
{
483
    struct ics_state *ics = spapr->icp->ics;
484
    uint32_t nr;
485

    
486
    if ((nargs != 1) || (nret != 1)) {
487
        rtas_st(rets, 0, -3);
488
        return;
489
    }
490

    
491
    nr = rtas_ld(args, 0);
492

    
493
    if (!ics_valid_irq(ics, nr)) {
494
        rtas_st(rets, 0, -3);
495
        return;
496
    }
497

    
498
    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
499
                   ics->irqs[nr - ics->offset].saved_priority,
500
                   ics->irqs[nr - ics->offset].saved_priority);
501

    
502
    rtas_st(rets, 0, 0); /* Success */
503
}
504

    
505
static void xics_reset(void *opaque)
506
{
507
    struct icp_state *icp = (struct icp_state *)opaque;
508
    struct ics_state *ics = icp->ics;
509
    int i;
510

    
511
    for (i = 0; i < icp->nr_servers; i++) {
512
        icp->ss[i].xirr = 0;
513
        icp->ss[i].pending_priority = 0xff;
514
        icp->ss[i].mfrr = 0xff;
515
        /* Make all outputs are deasserted */
516
        qemu_set_irq(icp->ss[i].output, 0);
517
    }
518

    
519
    memset(ics->irqs, 0, sizeof(struct ics_irq_state) * ics->nr_irqs);
520
    for (i = 0; i < ics->nr_irqs; i++) {
521
        ics->irqs[i].priority = 0xff;
522
        ics->irqs[i].saved_priority = 0xff;
523
    }
524
}
525

    
526
void xics_cpu_setup(struct icp_state *icp, PowerPCCPU *cpu)
527
{
528
    CPUState *cs = CPU(cpu);
529
    CPUPPCState *env = &cpu->env;
530
    struct icp_server_state *ss = &icp->ss[cs->cpu_index];
531

    
532
    assert(cs->cpu_index < icp->nr_servers);
533

    
534
    switch (PPC_INPUT(env)) {
535
    case PPC_FLAGS_INPUT_POWER7:
536
        ss->output = env->irq_inputs[POWER7_INPUT_INT];
537
        break;
538

    
539
    case PPC_FLAGS_INPUT_970:
540
        ss->output = env->irq_inputs[PPC970_INPUT_INT];
541
        break;
542

    
543
    default:
544
        fprintf(stderr, "XICS interrupt controller does not support this CPU "
545
                "bus model\n");
546
        abort();
547
    }
548
}
549

    
550
struct icp_state *xics_system_init(int nr_servers, int nr_irqs)
551
{
552
    struct icp_state *icp;
553
    struct ics_state *ics;
554

    
555
    icp = g_malloc0(sizeof(*icp));
556
    icp->nr_servers = nr_servers;
557
    icp->ss = g_malloc0(icp->nr_servers*sizeof(struct icp_server_state));
558

    
559
    ics = g_malloc0(sizeof(*ics));
560
    ics->nr_irqs = nr_irqs;
561
    ics->offset = XICS_IRQ_BASE;
562
    ics->irqs = g_malloc0(nr_irqs * sizeof(struct ics_irq_state));
563
    ics->islsi = g_malloc0(nr_irqs * sizeof(bool));
564

    
565
    icp->ics = ics;
566
    ics->icp = icp;
567

    
568
    ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, nr_irqs);
569

    
570
    spapr_register_hypercall(H_CPPR, h_cppr);
571
    spapr_register_hypercall(H_IPI, h_ipi);
572
    spapr_register_hypercall(H_XIRR, h_xirr);
573
    spapr_register_hypercall(H_EOI, h_eoi);
574

    
575
    spapr_rtas_register("ibm,set-xive", rtas_set_xive);
576
    spapr_rtas_register("ibm,get-xive", rtas_get_xive);
577
    spapr_rtas_register("ibm,int-off", rtas_int_off);
578
    spapr_rtas_register("ibm,int-on", rtas_int_on);
579

    
580
    qemu_register_reset(xics_reset, icp);
581

    
582
    return icp;
583
}