Statistics
| Branch: | Revision:

root / hw / ppc / xics.c @ 0d09e41a

History | View | Annotate | Download (14.9 kB)

1
/*
2
 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3
 *
4
 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
5
 *
6
 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a copy
9
 * of this software and associated documentation files (the "Software"), to deal
10
 * in the Software without restriction, including without limitation the rights
11
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
 * copies of the Software, and to permit persons to whom the Software is
13
 * furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice shall be included in
16
 * all copies or substantial portions of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24
 * THE SOFTWARE.
25
 *
26
 */
27

    
28
#include "hw/hw.h"
29
#include "trace.h"
30
#include "hw/ppc/spapr.h"
31
#include "hw/ppc/xics.h"
32

    
33
/*
34
 * ICP: Presentation layer
35
 */
36

    
37
struct icp_server_state {
38
    uint32_t xirr;
39
    uint8_t pending_priority;
40
    uint8_t mfrr;
41
    qemu_irq output;
42
};
43

    
44
#define XISR_MASK  0x00ffffff
45
#define CPPR_MASK  0xff000000
46

    
47
#define XISR(ss)   (((ss)->xirr) & XISR_MASK)
48
#define CPPR(ss)   (((ss)->xirr) >> 24)
49

    
50
struct ics_state;
51

    
52
struct icp_state {
53
    long nr_servers;
54
    struct icp_server_state *ss;
55
    struct ics_state *ics;
56
};
57

    
58
static void ics_reject(struct ics_state *ics, int nr);
59
static void ics_resend(struct ics_state *ics);
60
static void ics_eoi(struct ics_state *ics, int nr);
61

    
62
static void icp_check_ipi(struct icp_state *icp, int server)
63
{
64
    struct icp_server_state *ss = icp->ss + server;
65

    
66
    if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
67
        return;
68
    }
69

    
70
    trace_xics_icp_check_ipi(server, ss->mfrr);
71

    
72
    if (XISR(ss)) {
73
        ics_reject(icp->ics, XISR(ss));
74
    }
75

    
76
    ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
77
    ss->pending_priority = ss->mfrr;
78
    qemu_irq_raise(ss->output);
79
}
80

    
81
static void icp_resend(struct icp_state *icp, int server)
82
{
83
    struct icp_server_state *ss = icp->ss + server;
84

    
85
    if (ss->mfrr < CPPR(ss)) {
86
        icp_check_ipi(icp, server);
87
    }
88
    ics_resend(icp->ics);
89
}
90

    
91
static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
92
{
93
    struct icp_server_state *ss = icp->ss + server;
94
    uint8_t old_cppr;
95
    uint32_t old_xisr;
96

    
97
    old_cppr = CPPR(ss);
98
    ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
99

    
100
    if (cppr < old_cppr) {
101
        if (XISR(ss) && (cppr <= ss->pending_priority)) {
102
            old_xisr = XISR(ss);
103
            ss->xirr &= ~XISR_MASK; /* Clear XISR */
104
            qemu_irq_lower(ss->output);
105
            ics_reject(icp->ics, old_xisr);
106
        }
107
    } else {
108
        if (!XISR(ss)) {
109
            icp_resend(icp, server);
110
        }
111
    }
112
}
113

    
114
static void icp_set_mfrr(struct icp_state *icp, int server, uint8_t mfrr)
115
{
116
    struct icp_server_state *ss = icp->ss + server;
117

    
118
    ss->mfrr = mfrr;
119
    if (mfrr < CPPR(ss)) {
120
        icp_check_ipi(icp, server);
121
    }
122
}
123

    
124
static uint32_t icp_accept(struct icp_server_state *ss)
125
{
126
    uint32_t xirr = ss->xirr;
127

    
128
    qemu_irq_lower(ss->output);
129
    ss->xirr = ss->pending_priority << 24;
130

    
131
    trace_xics_icp_accept(xirr, ss->xirr);
132

    
133
    return xirr;
134
}
135

    
136
static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
137
{
138
    struct icp_server_state *ss = icp->ss + server;
139

    
140
    /* Send EOI -> ICS */
141
    ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
142
    trace_xics_icp_eoi(server, xirr, ss->xirr);
143
    ics_eoi(icp->ics, xirr & XISR_MASK);
144
    if (!XISR(ss)) {
145
        icp_resend(icp, server);
146
    }
147
}
148

    
149
static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
150
{
151
    struct icp_server_state *ss = icp->ss + server;
152

    
153
    trace_xics_icp_irq(server, nr, priority);
154

    
155
    if ((priority >= CPPR(ss))
156
        || (XISR(ss) && (ss->pending_priority <= priority))) {
157
        ics_reject(icp->ics, nr);
158
    } else {
159
        if (XISR(ss)) {
160
            ics_reject(icp->ics, XISR(ss));
161
        }
162
        ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
163
        ss->pending_priority = priority;
164
        trace_xics_icp_raise(ss->xirr, ss->pending_priority);
165
        qemu_irq_raise(ss->output);
166
    }
167
}
168

    
169
/*
170
 * ICS: Source layer
171
 */
172

    
173
struct ics_irq_state {
174
    int server;
175
    uint8_t priority;
176
    uint8_t saved_priority;
177
#define XICS_STATUS_ASSERTED           0x1
178
#define XICS_STATUS_SENT               0x2
179
#define XICS_STATUS_REJECTED           0x4
180
#define XICS_STATUS_MASKED_PENDING     0x8
181
    uint8_t status;
182
};
183

    
184
struct ics_state {
185
    int nr_irqs;
186
    int offset;
187
    qemu_irq *qirqs;
188
    bool *islsi;
189
    struct ics_irq_state *irqs;
190
    struct icp_state *icp;
191
};
192

    
193
static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
194
{
195
    return (nr >= ics->offset)
196
        && (nr < (ics->offset + ics->nr_irqs));
197
}
198

    
199
static void resend_msi(struct ics_state *ics, int srcno)
200
{
201
    struct ics_irq_state *irq = ics->irqs + srcno;
202

    
203
    /* FIXME: filter by server#? */
204
    if (irq->status & XICS_STATUS_REJECTED) {
205
        irq->status &= ~XICS_STATUS_REJECTED;
206
        if (irq->priority != 0xff) {
207
            icp_irq(ics->icp, irq->server, srcno + ics->offset,
208
                    irq->priority);
209
        }
210
    }
211
}
212

    
213
static void resend_lsi(struct ics_state *ics, int srcno)
214
{
215
    struct ics_irq_state *irq = ics->irqs + srcno;
216

    
217
    if ((irq->priority != 0xff)
218
        && (irq->status & XICS_STATUS_ASSERTED)
219
        && !(irq->status & XICS_STATUS_SENT)) {
220
        irq->status |= XICS_STATUS_SENT;
221
        icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
222
    }
223
}
224

    
225
static void set_irq_msi(struct ics_state *ics, int srcno, int val)
226
{
227
    struct ics_irq_state *irq = ics->irqs + srcno;
228

    
229
    trace_xics_set_irq_msi(srcno, srcno + ics->offset);
230

    
231
    if (val) {
232
        if (irq->priority == 0xff) {
233
            irq->status |= XICS_STATUS_MASKED_PENDING;
234
            trace_xics_masked_pending();
235
        } else  {
236
            icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
237
        }
238
    }
239
}
240

    
241
static void set_irq_lsi(struct ics_state *ics, int srcno, int val)
242
{
243
    struct ics_irq_state *irq = ics->irqs + srcno;
244

    
245
    trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
246
    if (val) {
247
        irq->status |= XICS_STATUS_ASSERTED;
248
    } else {
249
        irq->status &= ~XICS_STATUS_ASSERTED;
250
    }
251
    resend_lsi(ics, srcno);
252
}
253

    
254
static void ics_set_irq(void *opaque, int srcno, int val)
255
{
256
    struct ics_state *ics = (struct ics_state *)opaque;
257

    
258
    if (ics->islsi[srcno]) {
259
        set_irq_lsi(ics, srcno, val);
260
    } else {
261
        set_irq_msi(ics, srcno, val);
262
    }
263
}
264

    
265
static void write_xive_msi(struct ics_state *ics, int srcno)
266
{
267
    struct ics_irq_state *irq = ics->irqs + srcno;
268

    
269
    if (!(irq->status & XICS_STATUS_MASKED_PENDING)
270
        || (irq->priority == 0xff)) {
271
        return;
272
    }
273

    
274
    irq->status &= ~XICS_STATUS_MASKED_PENDING;
275
    icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
276
}
277

    
278
static void write_xive_lsi(struct ics_state *ics, int srcno)
279
{
280
    resend_lsi(ics, srcno);
281
}
282

    
283
static void ics_write_xive(struct ics_state *ics, int nr, int server,
284
                           uint8_t priority, uint8_t saved_priority)
285
{
286
    int srcno = nr - ics->offset;
287
    struct ics_irq_state *irq = ics->irqs + srcno;
288

    
289
    irq->server = server;
290
    irq->priority = priority;
291
    irq->saved_priority = saved_priority;
292

    
293
    trace_xics_ics_write_xive(nr, srcno, server, priority);
294

    
295
    if (ics->islsi[srcno]) {
296
        write_xive_lsi(ics, srcno);
297
    } else {
298
        write_xive_msi(ics, srcno);
299
    }
300
}
301

    
302
static void ics_reject(struct ics_state *ics, int nr)
303
{
304
    struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
305

    
306
    trace_xics_ics_reject(nr, nr - ics->offset);
307
    irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
308
    irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
309
}
310

    
311
static void ics_resend(struct ics_state *ics)
312
{
313
    int i;
314

    
315
    for (i = 0; i < ics->nr_irqs; i++) {
316
        /* FIXME: filter by server#? */
317
        if (ics->islsi[i]) {
318
            resend_lsi(ics, i);
319
        } else {
320
            resend_msi(ics, i);
321
        }
322
    }
323
}
324

    
325
static void ics_eoi(struct ics_state *ics, int nr)
326
{
327
    int srcno = nr - ics->offset;
328
    struct ics_irq_state *irq = ics->irqs + srcno;
329

    
330
    trace_xics_ics_eoi(nr);
331

    
332
    if (ics->islsi[srcno]) {
333
        irq->status &= ~XICS_STATUS_SENT;
334
    }
335
}
336

    
337
/*
338
 * Exported functions
339
 */
340

    
341
qemu_irq xics_get_qirq(struct icp_state *icp, int irq)
342
{
343
    if (!ics_valid_irq(icp->ics, irq)) {
344
        return NULL;
345
    }
346

    
347
    return icp->ics->qirqs[irq - icp->ics->offset];
348
}
349

    
350
void xics_set_irq_type(struct icp_state *icp, int irq, bool lsi)
351
{
352
    assert(ics_valid_irq(icp->ics, irq));
353

    
354
    icp->ics->islsi[irq - icp->ics->offset] = lsi;
355
}
356

    
357
static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
358
                           target_ulong opcode, target_ulong *args)
359
{
360
    CPUState *cs = CPU(cpu);
361
    target_ulong cppr = args[0];
362

    
363
    icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
364
    return H_SUCCESS;
365
}
366

    
367
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
368
                          target_ulong opcode, target_ulong *args)
369
{
370
    target_ulong server = args[0];
371
    target_ulong mfrr = args[1];
372

    
373
    if (server >= spapr->icp->nr_servers) {
374
        return H_PARAMETER;
375
    }
376

    
377
    icp_set_mfrr(spapr->icp, server, mfrr);
378
    return H_SUCCESS;
379
}
380

    
381
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
382
                           target_ulong opcode, target_ulong *args)
383
{
384
    CPUState *cs = CPU(cpu);
385
    uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
386

    
387
    args[0] = xirr;
388
    return H_SUCCESS;
389
}
390

    
391
static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
392
                          target_ulong opcode, target_ulong *args)
393
{
394
    CPUState *cs = CPU(cpu);
395
    target_ulong xirr = args[0];
396

    
397
    icp_eoi(spapr->icp, cs->cpu_index, xirr);
398
    return H_SUCCESS;
399
}
400

    
401
static void rtas_set_xive(sPAPREnvironment *spapr, uint32_t token,
402
                          uint32_t nargs, target_ulong args,
403
                          uint32_t nret, target_ulong rets)
404
{
405
    struct ics_state *ics = spapr->icp->ics;
406
    uint32_t nr, server, priority;
407

    
408
    if ((nargs != 3) || (nret != 1)) {
409
        rtas_st(rets, 0, -3);
410
        return;
411
    }
412

    
413
    nr = rtas_ld(args, 0);
414
    server = rtas_ld(args, 1);
415
    priority = rtas_ld(args, 2);
416

    
417
    if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
418
        || (priority > 0xff)) {
419
        rtas_st(rets, 0, -3);
420
        return;
421
    }
422

    
423
    ics_write_xive(ics, nr, server, priority, priority);
424

    
425
    rtas_st(rets, 0, 0); /* Success */
426
}
427

    
428
static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
429
                          uint32_t nargs, target_ulong args,
430
                          uint32_t nret, target_ulong rets)
431
{
432
    struct ics_state *ics = spapr->icp->ics;
433
    uint32_t nr;
434

    
435
    if ((nargs != 1) || (nret != 3)) {
436
        rtas_st(rets, 0, -3);
437
        return;
438
    }
439

    
440
    nr = rtas_ld(args, 0);
441

    
442
    if (!ics_valid_irq(ics, nr)) {
443
        rtas_st(rets, 0, -3);
444
        return;
445
    }
446

    
447
    rtas_st(rets, 0, 0); /* Success */
448
    rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
449
    rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
450
}
451

    
452
static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token,
453
                         uint32_t nargs, target_ulong args,
454
                         uint32_t nret, target_ulong rets)
455
{
456
    struct ics_state *ics = spapr->icp->ics;
457
    uint32_t nr;
458

    
459
    if ((nargs != 1) || (nret != 1)) {
460
        rtas_st(rets, 0, -3);
461
        return;
462
    }
463

    
464
    nr = rtas_ld(args, 0);
465

    
466
    if (!ics_valid_irq(ics, nr)) {
467
        rtas_st(rets, 0, -3);
468
        return;
469
    }
470

    
471
    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
472
                   ics->irqs[nr - ics->offset].priority);
473

    
474
    rtas_st(rets, 0, 0); /* Success */
475
}
476

    
477
static void rtas_int_on(sPAPREnvironment *spapr, uint32_t token,
478
                        uint32_t nargs, target_ulong args,
479
                        uint32_t nret, target_ulong rets)
480
{
481
    struct ics_state *ics = spapr->icp->ics;
482
    uint32_t nr;
483

    
484
    if ((nargs != 1) || (nret != 1)) {
485
        rtas_st(rets, 0, -3);
486
        return;
487
    }
488

    
489
    nr = rtas_ld(args, 0);
490

    
491
    if (!ics_valid_irq(ics, nr)) {
492
        rtas_st(rets, 0, -3);
493
        return;
494
    }
495

    
496
    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
497
                   ics->irqs[nr - ics->offset].saved_priority,
498
                   ics->irqs[nr - ics->offset].saved_priority);
499

    
500
    rtas_st(rets, 0, 0); /* Success */
501
}
502

    
503
static void xics_reset(void *opaque)
504
{
505
    struct icp_state *icp = (struct icp_state *)opaque;
506
    struct ics_state *ics = icp->ics;
507
    int i;
508

    
509
    for (i = 0; i < icp->nr_servers; i++) {
510
        icp->ss[i].xirr = 0;
511
        icp->ss[i].pending_priority = 0xff;
512
        icp->ss[i].mfrr = 0xff;
513
        /* Make all outputs are deasserted */
514
        qemu_set_irq(icp->ss[i].output, 0);
515
    }
516

    
517
    memset(ics->irqs, 0, sizeof(struct ics_irq_state) * ics->nr_irqs);
518
    for (i = 0; i < ics->nr_irqs; i++) {
519
        ics->irqs[i].priority = 0xff;
520
        ics->irqs[i].saved_priority = 0xff;
521
    }
522
}
523

    
524
void xics_cpu_setup(struct icp_state *icp, PowerPCCPU *cpu)
525
{
526
    CPUState *cs = CPU(cpu);
527
    CPUPPCState *env = &cpu->env;
528
    struct icp_server_state *ss = &icp->ss[cs->cpu_index];
529

    
530
    assert(cs->cpu_index < icp->nr_servers);
531

    
532
    switch (PPC_INPUT(env)) {
533
    case PPC_FLAGS_INPUT_POWER7:
534
        ss->output = env->irq_inputs[POWER7_INPUT_INT];
535
        break;
536

    
537
    case PPC_FLAGS_INPUT_970:
538
        ss->output = env->irq_inputs[PPC970_INPUT_INT];
539
        break;
540

    
541
    default:
542
        fprintf(stderr, "XICS interrupt controller does not support this CPU "
543
                "bus model\n");
544
        abort();
545
    }
546
}
547

    
548
struct icp_state *xics_system_init(int nr_servers, int nr_irqs)
549
{
550
    struct icp_state *icp;
551
    struct ics_state *ics;
552

    
553
    icp = g_malloc0(sizeof(*icp));
554
    icp->nr_servers = nr_servers;
555
    icp->ss = g_malloc0(icp->nr_servers*sizeof(struct icp_server_state));
556

    
557
    ics = g_malloc0(sizeof(*ics));
558
    ics->nr_irqs = nr_irqs;
559
    ics->offset = XICS_IRQ_BASE;
560
    ics->irqs = g_malloc0(nr_irqs * sizeof(struct ics_irq_state));
561
    ics->islsi = g_malloc0(nr_irqs * sizeof(bool));
562

    
563
    icp->ics = ics;
564
    ics->icp = icp;
565

    
566
    ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, nr_irqs);
567

    
568
    spapr_register_hypercall(H_CPPR, h_cppr);
569
    spapr_register_hypercall(H_IPI, h_ipi);
570
    spapr_register_hypercall(H_XIRR, h_xirr);
571
    spapr_register_hypercall(H_EOI, h_eoi);
572

    
573
    spapr_rtas_register("ibm,set-xive", rtas_set_xive);
574
    spapr_rtas_register("ibm,get-xive", rtas_get_xive);
575
    spapr_rtas_register("ibm,int-off", rtas_int_off);
576
    spapr_rtas_register("ibm,int-on", rtas_int_on);
577

    
578
    qemu_register_reset(xics_reset, icp);
579

    
580
    return icp;
581
}