Statistics
| Branch: | Revision:

root / hw / xics.c @ 22a2611c

History | View | Annotate | Download (15.1 kB)

1
/*
2
 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3
 *
4
 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
5
 *
6
 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a copy
9
 * of this software and associated documentation files (the "Software"), to deal
10
 * in the Software without restriction, including without limitation the rights
11
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
 * copies of the Software, and to permit persons to whom the Software is
13
 * furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice shall be included in
16
 * all copies or substantial portions of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24
 * THE SOFTWARE.
25
 *
26
 */
27

    
28
#include "hw.h"
29
#include "trace.h"
30
#include "hw/spapr.h"
31
#include "hw/xics.h"
32

    
33
/*
34
 * ICP: Presentation layer
35
 */
36

    
37
struct icp_server_state {
38
    uint32_t xirr;
39
    uint8_t pending_priority;
40
    uint8_t mfrr;
41
    qemu_irq output;
42
};
43

    
44
#define XISR_MASK  0x00ffffff
45
#define CPPR_MASK  0xff000000
46

    
47
#define XISR(ss)   (((ss)->xirr) & XISR_MASK)
48
#define CPPR(ss)   (((ss)->xirr) >> 24)
49

    
50
struct ics_state;
51

    
52
struct icp_state {
53
    long nr_servers;
54
    struct icp_server_state *ss;
55
    struct ics_state *ics;
56
};
57

    
58
static void ics_reject(struct ics_state *ics, int nr);
59
static void ics_resend(struct ics_state *ics);
60
static void ics_eoi(struct ics_state *ics, int nr);
61

    
62
static void icp_check_ipi(struct icp_state *icp, int server)
63
{
64
    struct icp_server_state *ss = icp->ss + server;
65

    
66
    if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
67
        return;
68
    }
69

    
70
    trace_xics_icp_check_ipi(server, ss->mfrr);
71

    
72
    if (XISR(ss)) {
73
        ics_reject(icp->ics, XISR(ss));
74
    }
75

    
76
    ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
77
    ss->pending_priority = ss->mfrr;
78
    qemu_irq_raise(ss->output);
79
}
80

    
81
static void icp_resend(struct icp_state *icp, int server)
82
{
83
    struct icp_server_state *ss = icp->ss + server;
84

    
85
    if (ss->mfrr < CPPR(ss)) {
86
        icp_check_ipi(icp, server);
87
    }
88
    ics_resend(icp->ics);
89
}
90

    
91
static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
92
{
93
    struct icp_server_state *ss = icp->ss + server;
94
    uint8_t old_cppr;
95
    uint32_t old_xisr;
96

    
97
    old_cppr = CPPR(ss);
98
    ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
99

    
100
    if (cppr < old_cppr) {
101
        if (XISR(ss) && (cppr <= ss->pending_priority)) {
102
            old_xisr = XISR(ss);
103
            ss->xirr &= ~XISR_MASK; /* Clear XISR */
104
            qemu_irq_lower(ss->output);
105
            ics_reject(icp->ics, old_xisr);
106
        }
107
    } else {
108
        if (!XISR(ss)) {
109
            icp_resend(icp, server);
110
        }
111
    }
112
}
113

    
114
static void icp_set_mfrr(struct icp_state *icp, int server, uint8_t mfrr)
115
{
116
    struct icp_server_state *ss = icp->ss + server;
117

    
118
    ss->mfrr = mfrr;
119
    if (mfrr < CPPR(ss)) {
120
        icp_check_ipi(icp, server);
121
    }
122
}
123

    
124
static uint32_t icp_accept(struct icp_server_state *ss)
125
{
126
    uint32_t xirr = ss->xirr;
127

    
128
    qemu_irq_lower(ss->output);
129
    ss->xirr = ss->pending_priority << 24;
130

    
131
    trace_xics_icp_accept(xirr, ss->xirr);
132

    
133
    return xirr;
134
}
135

    
136
static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
137
{
138
    struct icp_server_state *ss = icp->ss + server;
139

    
140
    /* Send EOI -> ICS */
141
    ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
142
    trace_xics_icp_eoi(server, xirr, ss->xirr);
143
    ics_eoi(icp->ics, xirr & XISR_MASK);
144
    if (!XISR(ss)) {
145
        icp_resend(icp, server);
146
    }
147
}
148

    
149
static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
150
{
151
    struct icp_server_state *ss = icp->ss + server;
152

    
153
    trace_xics_icp_irq(server, nr, priority);
154

    
155
    if ((priority >= CPPR(ss))
156
        || (XISR(ss) && (ss->pending_priority <= priority))) {
157
        ics_reject(icp->ics, nr);
158
    } else {
159
        if (XISR(ss)) {
160
            ics_reject(icp->ics, XISR(ss));
161
        }
162
        ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
163
        ss->pending_priority = priority;
164
        trace_xics_icp_raise(ss->xirr, ss->pending_priority);
165
        qemu_irq_raise(ss->output);
166
    }
167
}
168

    
169
/*
170
 * ICS: Source layer
171
 */
172

    
173
struct ics_irq_state {
174
    int server;
175
    uint8_t priority;
176
    uint8_t saved_priority;
177
#define XICS_STATUS_ASSERTED           0x1
178
#define XICS_STATUS_SENT               0x2
179
#define XICS_STATUS_REJECTED           0x4
180
#define XICS_STATUS_MASKED_PENDING     0x8
181
    uint8_t status;
182
};
183

    
184
struct ics_state {
185
    int nr_irqs;
186
    int offset;
187
    qemu_irq *qirqs;
188
    bool *islsi;
189
    struct ics_irq_state *irqs;
190
    struct icp_state *icp;
191
};
192

    
193
static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
194
{
195
    return (nr >= ics->offset)
196
        && (nr < (ics->offset + ics->nr_irqs));
197
}
198

    
199
static void resend_msi(struct ics_state *ics, int srcno)
200
{
201
    struct ics_irq_state *irq = ics->irqs + srcno;
202

    
203
    /* FIXME: filter by server#? */
204
    if (irq->status & XICS_STATUS_REJECTED) {
205
        irq->status &= ~XICS_STATUS_REJECTED;
206
        if (irq->priority != 0xff) {
207
            icp_irq(ics->icp, irq->server, srcno + ics->offset,
208
                    irq->priority);
209
        }
210
    }
211
}
212

    
213
static void resend_lsi(struct ics_state *ics, int srcno)
214
{
215
    struct ics_irq_state *irq = ics->irqs + srcno;
216

    
217
    if ((irq->priority != 0xff)
218
        && (irq->status & XICS_STATUS_ASSERTED)
219
        && !(irq->status & XICS_STATUS_SENT)) {
220
        irq->status |= XICS_STATUS_SENT;
221
        icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
222
    }
223
}
224

    
225
static void set_irq_msi(struct ics_state *ics, int srcno, int val)
226
{
227
    struct ics_irq_state *irq = ics->irqs + srcno;
228

    
229
    trace_xics_set_irq_msi(srcno, srcno + ics->offset);
230

    
231
    if (val) {
232
        if (irq->priority == 0xff) {
233
            irq->status |= XICS_STATUS_MASKED_PENDING;
234
            trace_xics_masked_pending();
235
        } else  {
236
            icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
237
        }
238
    }
239
}
240

    
241
static void set_irq_lsi(struct ics_state *ics, int srcno, int val)
242
{
243
    struct ics_irq_state *irq = ics->irqs + srcno;
244

    
245
    trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
246
    if (val) {
247
        irq->status |= XICS_STATUS_ASSERTED;
248
    } else {
249
        irq->status &= ~XICS_STATUS_ASSERTED;
250
    }
251
    resend_lsi(ics, srcno);
252
}
253

    
254
static void ics_set_irq(void *opaque, int srcno, int val)
255
{
256
    struct ics_state *ics = (struct ics_state *)opaque;
257

    
258
    if (ics->islsi[srcno]) {
259
        set_irq_lsi(ics, srcno, val);
260
    } else {
261
        set_irq_msi(ics, srcno, val);
262
    }
263
}
264

    
265
static void write_xive_msi(struct ics_state *ics, int srcno)
266
{
267
    struct ics_irq_state *irq = ics->irqs + srcno;
268

    
269
    if (!(irq->status & XICS_STATUS_MASKED_PENDING)
270
        || (irq->priority == 0xff)) {
271
        return;
272
    }
273

    
274
    irq->status &= ~XICS_STATUS_MASKED_PENDING;
275
    icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
276
}
277

    
278
static void write_xive_lsi(struct ics_state *ics, int srcno)
279
{
280
    resend_lsi(ics, srcno);
281
}
282

    
283
static void ics_write_xive(struct ics_state *ics, int nr, int server,
284
                           uint8_t priority, uint8_t saved_priority)
285
{
286
    int srcno = nr - ics->offset;
287
    struct ics_irq_state *irq = ics->irqs + srcno;
288

    
289
    irq->server = server;
290
    irq->priority = priority;
291
    irq->saved_priority = saved_priority;
292

    
293
    trace_xics_ics_write_xive(nr, srcno, server, priority);
294

    
295
    if (ics->islsi[srcno]) {
296
        write_xive_lsi(ics, srcno);
297
    } else {
298
        write_xive_msi(ics, srcno);
299
    }
300
}
301

    
302
static void ics_reject(struct ics_state *ics, int nr)
303
{
304
    struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
305

    
306
    trace_xics_ics_reject(nr, nr - ics->offset);
307
    irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
308
    irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
309
}
310

    
311
static void ics_resend(struct ics_state *ics)
312
{
313
    int i;
314

    
315
    for (i = 0; i < ics->nr_irqs; i++) {
316
        /* FIXME: filter by server#? */
317
        if (ics->islsi[i]) {
318
            resend_lsi(ics, i);
319
        } else {
320
            resend_msi(ics, i);
321
        }
322
    }
323
}
324

    
325
static void ics_eoi(struct ics_state *ics, int nr)
326
{
327
    int srcno = nr - ics->offset;
328
    struct ics_irq_state *irq = ics->irqs + srcno;
329

    
330
    trace_xics_ics_eoi(nr);
331

    
332
    if (ics->islsi[srcno]) {
333
        irq->status &= ~XICS_STATUS_SENT;
334
    }
335
}
336

    
337
/*
338
 * Exported functions
339
 */
340

    
341
qemu_irq xics_get_qirq(struct icp_state *icp, int irq)
342
{
343
    if (!ics_valid_irq(icp->ics, irq)) {
344
        return NULL;
345
    }
346

    
347
    return icp->ics->qirqs[irq - icp->ics->offset];
348
}
349

    
350
void xics_set_irq_type(struct icp_state *icp, int irq, bool lsi)
351
{
352
    assert(ics_valid_irq(icp->ics, irq));
353

    
354
    icp->ics->islsi[irq - icp->ics->offset] = lsi;
355
}
356

    
357
static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
358
                           target_ulong opcode, target_ulong *args)
359
{
360
    CPUPPCState *env = &cpu->env;
361
    target_ulong cppr = args[0];
362

    
363
    icp_set_cppr(spapr->icp, env->cpu_index, cppr);
364
    return H_SUCCESS;
365
}
366

    
367
static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
368
                          target_ulong opcode, target_ulong *args)
369
{
370
    target_ulong server = args[0];
371
    target_ulong mfrr = args[1];
372

    
373
    if (server >= spapr->icp->nr_servers) {
374
        return H_PARAMETER;
375
    }
376

    
377
    icp_set_mfrr(spapr->icp, server, mfrr);
378
    return H_SUCCESS;
379

    
380
}
381

    
382
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
383
                           target_ulong opcode, target_ulong *args)
384
{
385
    CPUPPCState *env = &cpu->env;
386
    uint32_t xirr = icp_accept(spapr->icp->ss + env->cpu_index);
387

    
388
    args[0] = xirr;
389
    return H_SUCCESS;
390
}
391

    
392
static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
393
                          target_ulong opcode, target_ulong *args)
394
{
395
    CPUPPCState *env = &cpu->env;
396
    target_ulong xirr = args[0];
397

    
398
    icp_eoi(spapr->icp, env->cpu_index, xirr);
399
    return H_SUCCESS;
400
}
401

    
402
static void rtas_set_xive(sPAPREnvironment *spapr, uint32_t token,
403
                          uint32_t nargs, target_ulong args,
404
                          uint32_t nret, target_ulong rets)
405
{
406
    struct ics_state *ics = spapr->icp->ics;
407
    uint32_t nr, server, priority;
408

    
409
    if ((nargs != 3) || (nret != 1)) {
410
        rtas_st(rets, 0, -3);
411
        return;
412
    }
413

    
414
    nr = rtas_ld(args, 0);
415
    server = rtas_ld(args, 1);
416
    priority = rtas_ld(args, 2);
417

    
418
    if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
419
        || (priority > 0xff)) {
420
        rtas_st(rets, 0, -3);
421
        return;
422
    }
423

    
424
    ics_write_xive(ics, nr, server, priority, priority);
425

    
426
    rtas_st(rets, 0, 0); /* Success */
427
}
428

    
429
static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
430
                          uint32_t nargs, target_ulong args,
431
                          uint32_t nret, target_ulong rets)
432
{
433
    struct ics_state *ics = spapr->icp->ics;
434
    uint32_t nr;
435

    
436
    if ((nargs != 1) || (nret != 3)) {
437
        rtas_st(rets, 0, -3);
438
        return;
439
    }
440

    
441
    nr = rtas_ld(args, 0);
442

    
443
    if (!ics_valid_irq(ics, nr)) {
444
        rtas_st(rets, 0, -3);
445
        return;
446
    }
447

    
448
    rtas_st(rets, 0, 0); /* Success */
449
    rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
450
    rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
451
}
452

    
453
static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token,
454
                         uint32_t nargs, target_ulong args,
455
                         uint32_t nret, target_ulong rets)
456
{
457
    struct ics_state *ics = spapr->icp->ics;
458
    uint32_t nr;
459

    
460
    if ((nargs != 1) || (nret != 1)) {
461
        rtas_st(rets, 0, -3);
462
        return;
463
    }
464

    
465
    nr = rtas_ld(args, 0);
466

    
467
    if (!ics_valid_irq(ics, nr)) {
468
        rtas_st(rets, 0, -3);
469
        return;
470
    }
471

    
472
    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
473
                   ics->irqs[nr - ics->offset].priority);
474

    
475
    rtas_st(rets, 0, 0); /* Success */
476
}
477

    
478
static void rtas_int_on(sPAPREnvironment *spapr, uint32_t token,
479
                        uint32_t nargs, target_ulong args,
480
                        uint32_t nret, target_ulong rets)
481
{
482
    struct ics_state *ics = spapr->icp->ics;
483
    uint32_t nr;
484

    
485
    if ((nargs != 1) || (nret != 1)) {
486
        rtas_st(rets, 0, -3);
487
        return;
488
    }
489

    
490
    nr = rtas_ld(args, 0);
491

    
492
    if (!ics_valid_irq(ics, nr)) {
493
        rtas_st(rets, 0, -3);
494
        return;
495
    }
496

    
497
    ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
498
                   ics->irqs[nr - ics->offset].saved_priority,
499
                   ics->irqs[nr - ics->offset].saved_priority);
500

    
501
    rtas_st(rets, 0, 0); /* Success */
502
}
503

    
504
static void xics_reset(void *opaque)
505
{
506
    struct icp_state *icp = (struct icp_state *)opaque;
507
    struct ics_state *ics = icp->ics;
508
    int i;
509

    
510
    for (i = 0; i < icp->nr_servers; i++) {
511
        icp->ss[i].xirr = 0;
512
        icp->ss[i].pending_priority = 0xff;
513
        icp->ss[i].mfrr = 0xff;
514
        /* Make all outputs are deasserted */
515
        qemu_set_irq(icp->ss[i].output, 0);
516
    }
517

    
518
    memset(ics->irqs, 0, sizeof(struct ics_irq_state) * ics->nr_irqs);
519
    for (i = 0; i < ics->nr_irqs; i++) {
520
        ics->irqs[i].priority = 0xff;
521
        ics->irqs[i].saved_priority = 0xff;
522
    }
523
}
524

    
525
struct icp_state *xics_system_init(int nr_irqs)
526
{
527
    CPUPPCState *env;
528
    int max_server_num;
529
    struct icp_state *icp;
530
    struct ics_state *ics;
531

    
532
    max_server_num = -1;
533
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
534
        if (env->cpu_index > max_server_num) {
535
            max_server_num = env->cpu_index;
536
        }
537
    }
538

    
539
    icp = g_malloc0(sizeof(*icp));
540
    icp->nr_servers = max_server_num + 1;
541
    icp->ss = g_malloc0(icp->nr_servers*sizeof(struct icp_server_state));
542

    
543
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
544
        struct icp_server_state *ss = &icp->ss[env->cpu_index];
545

    
546
        switch (PPC_INPUT(env)) {
547
        case PPC_FLAGS_INPUT_POWER7:
548
            ss->output = env->irq_inputs[POWER7_INPUT_INT];
549
            break;
550

    
551
        case PPC_FLAGS_INPUT_970:
552
            ss->output = env->irq_inputs[PPC970_INPUT_INT];
553
            break;
554

    
555
        default:
556
            hw_error("XICS interrupt model does not support this CPU bus "
557
                     "model\n");
558
            exit(1);
559
        }
560
    }
561

    
562
    ics = g_malloc0(sizeof(*ics));
563
    ics->nr_irqs = nr_irqs;
564
    ics->offset = XICS_IRQ_BASE;
565
    ics->irqs = g_malloc0(nr_irqs * sizeof(struct ics_irq_state));
566
    ics->islsi = g_malloc0(nr_irqs * sizeof(bool));
567

    
568
    icp->ics = ics;
569
    ics->icp = icp;
570

    
571
    ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, nr_irqs);
572

    
573
    spapr_register_hypercall(H_CPPR, h_cppr);
574
    spapr_register_hypercall(H_IPI, h_ipi);
575
    spapr_register_hypercall(H_XIRR, h_xirr);
576
    spapr_register_hypercall(H_EOI, h_eoi);
577

    
578
    spapr_rtas_register("ibm,set-xive", rtas_set_xive);
579
    spapr_rtas_register("ibm,get-xive", rtas_get_xive);
580
    spapr_rtas_register("ibm,int-off", rtas_int_off);
581
    spapr_rtas_register("ibm,int-on", rtas_int_on);
582

    
583
    qemu_register_reset(xics_reset, icp);
584

    
585
    return icp;
586
}