root / hw / arm_gic.c @ 2a29ddee
History | View | Annotate | Download (27.7 kB)
1 |
/*
|
---|---|
2 |
* ARM Generic/Distributed Interrupt Controller
|
3 |
*
|
4 |
* Copyright (c) 2006-2007 CodeSourcery.
|
5 |
* Written by Paul Brook
|
6 |
*
|
7 |
* This code is licensed under the GPL.
|
8 |
*/
|
9 |
|
10 |
/* This file contains implementation code for the RealView EB interrupt
|
11 |
* controller, MPCore distributed interrupt controller and ARMv7-M
|
12 |
* Nested Vectored Interrupt Controller.
|
13 |
* It is compiled in two ways:
|
14 |
* (1) as a standalone file to produce a sysbus device which is a GIC
|
15 |
* that can be used on the realview board and as one of the builtin
|
16 |
* private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
|
17 |
* (2) by being directly #included into armv7m_nvic.c to produce the
|
18 |
* armv7m_nvic device.
|
19 |
*/
|
20 |
|
21 |
#include "sysbus.h" |
22 |
|
23 |
/* Maximum number of possible interrupts, determined by the GIC architecture */
|
24 |
#define GIC_MAXIRQ 1020 |
25 |
/* First 32 are private to each CPU (SGIs and PPIs). */
|
26 |
#define GIC_INTERNAL 32 |
27 |
/* Maximum number of possible CPU interfaces, determined by GIC architecture */
|
28 |
#define NCPU 8 |
29 |
|
30 |
//#define DEBUG_GIC
|
31 |
|
32 |
#ifdef DEBUG_GIC
|
33 |
#define DPRINTF(fmt, ...) \
|
34 |
do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0) |
35 |
#else
|
36 |
#define DPRINTF(fmt, ...) do {} while(0) |
37 |
#endif
|
38 |
|
39 |
#ifdef NVIC
|
40 |
/* The NVIC has 16 internal vectors. However these are not exposed
|
41 |
through the normal GIC interface. */
|
42 |
#define GIC_BASE_IRQ 32 |
43 |
#else
|
44 |
#define GIC_BASE_IRQ 0 |
45 |
#endif
|
46 |
|
47 |
static const uint8_t gic_id[] = { |
48 |
0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 |
49 |
}; |
50 |
|
51 |
#define FROM_SYSBUSGIC(type, dev) \
|
52 |
DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev)) |
53 |
|
54 |
typedef struct gic_irq_state |
55 |
{ |
56 |
/* The enable bits are only banked for per-cpu interrupts. */
|
57 |
unsigned enabled:NCPU;
|
58 |
unsigned pending:NCPU;
|
59 |
unsigned active:NCPU;
|
60 |
unsigned level:NCPU;
|
61 |
unsigned model:1; /* 0 = N:N, 1 = 1:N */ |
62 |
unsigned trigger:1; /* nonzero = edge triggered. */ |
63 |
} gic_irq_state; |
64 |
|
65 |
#define ALL_CPU_MASK ((unsigned)(((1 << NCPU) - 1))) |
66 |
#define NUM_CPU(s) ((s)->num_cpu)
|
67 |
|
68 |
#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
|
69 |
#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
|
70 |
#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) |
71 |
#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
|
72 |
#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
|
73 |
#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) |
74 |
#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
|
75 |
#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
|
76 |
#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) |
77 |
#define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 |
78 |
#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 |
79 |
#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
|
80 |
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
|
81 |
#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
|
82 |
#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) |
83 |
#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 |
84 |
#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 |
85 |
#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
|
86 |
#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
|
87 |
s->priority1[irq][cpu] : \ |
88 |
s->priority2[(irq) - GIC_INTERNAL]) |
89 |
#ifdef NVIC
|
90 |
#define GIC_TARGET(irq) 1 |
91 |
#else
|
92 |
#define GIC_TARGET(irq) s->irq_target[irq]
|
93 |
#endif
|
94 |
|
95 |
typedef struct gic_state |
96 |
{ |
97 |
SysBusDevice busdev; |
98 |
qemu_irq parent_irq[NCPU]; |
99 |
int enabled;
|
100 |
int cpu_enabled[NCPU];
|
101 |
|
102 |
gic_irq_state irq_state[GIC_MAXIRQ]; |
103 |
int irq_target[GIC_MAXIRQ];
|
104 |
int priority1[GIC_INTERNAL][NCPU];
|
105 |
int priority2[GIC_MAXIRQ - GIC_INTERNAL];
|
106 |
int last_active[GIC_MAXIRQ][NCPU];
|
107 |
|
108 |
int priority_mask[NCPU];
|
109 |
int running_irq[NCPU];
|
110 |
int running_priority[NCPU];
|
111 |
int current_pending[NCPU];
|
112 |
|
113 |
uint32_t num_cpu; |
114 |
|
115 |
MemoryRegion iomem; /* Distributor */
|
116 |
/* This is just so we can have an opaque pointer which identifies
|
117 |
* both this GIC and which CPU interface we should be accessing.
|
118 |
*/
|
119 |
struct gic_state *backref[NCPU];
|
120 |
MemoryRegion cpuiomem[NCPU+1]; /* CPU interfaces */ |
121 |
uint32_t num_irq; |
122 |
} gic_state; |
123 |
|
124 |
static inline int gic_get_current_cpu(gic_state *s) |
125 |
{ |
126 |
if (s->num_cpu > 1) { |
127 |
return cpu_single_env->cpu_index;
|
128 |
} |
129 |
return 0; |
130 |
} |
131 |
|
132 |
/* TODO: Many places that call this routine could be optimized. */
|
133 |
/* Update interrupt status after enabled or pending bits have been changed. */
|
134 |
static void gic_update(gic_state *s) |
135 |
{ |
136 |
int best_irq;
|
137 |
int best_prio;
|
138 |
int irq;
|
139 |
int level;
|
140 |
int cpu;
|
141 |
int cm;
|
142 |
|
143 |
for (cpu = 0; cpu < NUM_CPU(s); cpu++) { |
144 |
cm = 1 << cpu;
|
145 |
s->current_pending[cpu] = 1023;
|
146 |
if (!s->enabled || !s->cpu_enabled[cpu]) {
|
147 |
qemu_irq_lower(s->parent_irq[cpu]); |
148 |
return;
|
149 |
} |
150 |
best_prio = 0x100;
|
151 |
best_irq = 1023;
|
152 |
for (irq = 0; irq < s->num_irq; irq++) { |
153 |
if (GIC_TEST_ENABLED(irq, cm) && GIC_TEST_PENDING(irq, cm)) {
|
154 |
if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
|
155 |
best_prio = GIC_GET_PRIORITY(irq, cpu); |
156 |
best_irq = irq; |
157 |
} |
158 |
} |
159 |
} |
160 |
level = 0;
|
161 |
if (best_prio <= s->priority_mask[cpu]) {
|
162 |
s->current_pending[cpu] = best_irq; |
163 |
if (best_prio < s->running_priority[cpu]) {
|
164 |
DPRINTF("Raised pending IRQ %d\n", best_irq);
|
165 |
level = 1;
|
166 |
} |
167 |
} |
168 |
qemu_set_irq(s->parent_irq[cpu], level); |
169 |
} |
170 |
} |
171 |
|
172 |
#ifdef NVIC
|
173 |
static void gic_set_pending_private(gic_state *s, int cpu, int irq) |
174 |
{ |
175 |
int cm = 1 << cpu; |
176 |
|
177 |
if (GIC_TEST_PENDING(irq, cm))
|
178 |
return;
|
179 |
|
180 |
DPRINTF("Set %d pending cpu %d\n", irq, cpu);
|
181 |
GIC_SET_PENDING(irq, cm); |
182 |
gic_update(s); |
183 |
} |
184 |
#endif
|
185 |
|
186 |
/* Process a change in an external IRQ input. */
|
187 |
static void gic_set_irq(void *opaque, int irq, int level) |
188 |
{ |
189 |
/* Meaning of the 'irq' parameter:
|
190 |
* [0..N-1] : external interrupts
|
191 |
* [N..N+31] : PPI (internal) interrupts for CPU 0
|
192 |
* [N+32..N+63] : PPI (internal interrupts for CPU 1
|
193 |
* ...
|
194 |
*/
|
195 |
gic_state *s = (gic_state *)opaque; |
196 |
int cm, target;
|
197 |
if (irq < (s->num_irq - GIC_INTERNAL)) {
|
198 |
/* The first external input line is internal interrupt 32. */
|
199 |
cm = ALL_CPU_MASK; |
200 |
irq += GIC_INTERNAL; |
201 |
target = GIC_TARGET(irq); |
202 |
} else {
|
203 |
int cpu;
|
204 |
irq -= (s->num_irq - GIC_INTERNAL); |
205 |
cpu = irq / GIC_INTERNAL; |
206 |
irq %= GIC_INTERNAL; |
207 |
cm = 1 << cpu;
|
208 |
target = cm; |
209 |
} |
210 |
|
211 |
if (level == GIC_TEST_LEVEL(irq, cm)) {
|
212 |
return;
|
213 |
} |
214 |
|
215 |
if (level) {
|
216 |
GIC_SET_LEVEL(irq, cm); |
217 |
if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
|
218 |
DPRINTF("Set %d pending mask %x\n", irq, target);
|
219 |
GIC_SET_PENDING(irq, target); |
220 |
} |
221 |
} else {
|
222 |
GIC_CLEAR_LEVEL(irq, cm); |
223 |
} |
224 |
gic_update(s); |
225 |
} |
226 |
|
227 |
static void gic_set_running_irq(gic_state *s, int cpu, int irq) |
228 |
{ |
229 |
s->running_irq[cpu] = irq; |
230 |
if (irq == 1023) { |
231 |
s->running_priority[cpu] = 0x100;
|
232 |
} else {
|
233 |
s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); |
234 |
} |
235 |
gic_update(s); |
236 |
} |
237 |
|
238 |
static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) |
239 |
{ |
240 |
int new_irq;
|
241 |
int cm = 1 << cpu; |
242 |
new_irq = s->current_pending[cpu]; |
243 |
if (new_irq == 1023 |
244 |
|| GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) { |
245 |
DPRINTF("ACK no pending IRQ\n");
|
246 |
return 1023; |
247 |
} |
248 |
s->last_active[new_irq][cpu] = s->running_irq[cpu]; |
249 |
/* Clear pending flags for both level and edge triggered interrupts.
|
250 |
Level triggered IRQs will be reasserted once they become inactive. */
|
251 |
GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm); |
252 |
gic_set_running_irq(s, cpu, new_irq); |
253 |
DPRINTF("ACK %d\n", new_irq);
|
254 |
return new_irq;
|
255 |
} |
256 |
|
257 |
static void gic_complete_irq(gic_state * s, int cpu, int irq) |
258 |
{ |
259 |
int update = 0; |
260 |
int cm = 1 << cpu; |
261 |
DPRINTF("EOI %d\n", irq);
|
262 |
if (irq >= s->num_irq) {
|
263 |
/* This handles two cases:
|
264 |
* 1. If software writes the ID of a spurious interrupt [ie 1023]
|
265 |
* to the GICC_EOIR, the GIC ignores that write.
|
266 |
* 2. If software writes the number of a non-existent interrupt
|
267 |
* this must be a subcase of "value written does not match the last
|
268 |
* valid interrupt value read from the Interrupt Acknowledge
|
269 |
* register" and so this is UNPREDICTABLE. We choose to ignore it.
|
270 |
*/
|
271 |
return;
|
272 |
} |
273 |
if (s->running_irq[cpu] == 1023) |
274 |
return; /* No active IRQ. */ |
275 |
/* Mark level triggered interrupts as pending if they are still
|
276 |
raised. */
|
277 |
if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
|
278 |
&& GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
|
279 |
DPRINTF("Set %d pending mask %x\n", irq, cm);
|
280 |
GIC_SET_PENDING(irq, cm); |
281 |
update = 1;
|
282 |
} |
283 |
if (irq != s->running_irq[cpu]) {
|
284 |
/* Complete an IRQ that is not currently running. */
|
285 |
int tmp = s->running_irq[cpu];
|
286 |
while (s->last_active[tmp][cpu] != 1023) { |
287 |
if (s->last_active[tmp][cpu] == irq) {
|
288 |
s->last_active[tmp][cpu] = s->last_active[irq][cpu]; |
289 |
break;
|
290 |
} |
291 |
tmp = s->last_active[tmp][cpu]; |
292 |
} |
293 |
if (update) {
|
294 |
gic_update(s); |
295 |
} |
296 |
} else {
|
297 |
/* Complete the current running IRQ. */
|
298 |
gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); |
299 |
} |
300 |
} |
301 |
|
302 |
static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) |
303 |
{ |
304 |
gic_state *s = (gic_state *)opaque; |
305 |
uint32_t res; |
306 |
int irq;
|
307 |
int i;
|
308 |
int cpu;
|
309 |
int cm;
|
310 |
int mask;
|
311 |
|
312 |
cpu = gic_get_current_cpu(s); |
313 |
cm = 1 << cpu;
|
314 |
if (offset < 0x100) { |
315 |
if (offset == 0) |
316 |
return s->enabled;
|
317 |
if (offset == 4) |
318 |
return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5); |
319 |
if (offset < 0x08) |
320 |
return 0; |
321 |
if (offset >= 0x80) { |
322 |
/* Interrupt Security , RAZ/WI */
|
323 |
return 0; |
324 |
} |
325 |
goto bad_reg;
|
326 |
} else if (offset < 0x200) { |
327 |
/* Interrupt Set/Clear Enable. */
|
328 |
if (offset < 0x180) |
329 |
irq = (offset - 0x100) * 8; |
330 |
else
|
331 |
irq = (offset - 0x180) * 8; |
332 |
irq += GIC_BASE_IRQ; |
333 |
if (irq >= s->num_irq)
|
334 |
goto bad_reg;
|
335 |
res = 0;
|
336 |
for (i = 0; i < 8; i++) { |
337 |
if (GIC_TEST_ENABLED(irq + i, cm)) {
|
338 |
res |= (1 << i);
|
339 |
} |
340 |
} |
341 |
} else if (offset < 0x300) { |
342 |
/* Interrupt Set/Clear Pending. */
|
343 |
if (offset < 0x280) |
344 |
irq = (offset - 0x200) * 8; |
345 |
else
|
346 |
irq = (offset - 0x280) * 8; |
347 |
irq += GIC_BASE_IRQ; |
348 |
if (irq >= s->num_irq)
|
349 |
goto bad_reg;
|
350 |
res = 0;
|
351 |
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; |
352 |
for (i = 0; i < 8; i++) { |
353 |
if (GIC_TEST_PENDING(irq + i, mask)) {
|
354 |
res |= (1 << i);
|
355 |
} |
356 |
} |
357 |
} else if (offset < 0x400) { |
358 |
/* Interrupt Active. */
|
359 |
irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; |
360 |
if (irq >= s->num_irq)
|
361 |
goto bad_reg;
|
362 |
res = 0;
|
363 |
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; |
364 |
for (i = 0; i < 8; i++) { |
365 |
if (GIC_TEST_ACTIVE(irq + i, mask)) {
|
366 |
res |= (1 << i);
|
367 |
} |
368 |
} |
369 |
} else if (offset < 0x800) { |
370 |
/* Interrupt Priority. */
|
371 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
372 |
if (irq >= s->num_irq)
|
373 |
goto bad_reg;
|
374 |
res = GIC_GET_PRIORITY(irq, cpu); |
375 |
#ifndef NVIC
|
376 |
} else if (offset < 0xc00) { |
377 |
/* Interrupt CPU Target. */
|
378 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
379 |
if (irq >= s->num_irq)
|
380 |
goto bad_reg;
|
381 |
if (irq >= 29 && irq <= 31) { |
382 |
res = cm; |
383 |
} else {
|
384 |
res = GIC_TARGET(irq); |
385 |
} |
386 |
#endif
|
387 |
} else if (offset < 0xf00) { |
388 |
/* Interrupt Configuration. */
|
389 |
irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; |
390 |
if (irq >= s->num_irq)
|
391 |
goto bad_reg;
|
392 |
res = 0;
|
393 |
for (i = 0; i < 4; i++) { |
394 |
if (GIC_TEST_MODEL(irq + i))
|
395 |
res |= (1 << (i * 2)); |
396 |
if (GIC_TEST_TRIGGER(irq + i))
|
397 |
res |= (2 << (i * 2)); |
398 |
} |
399 |
} else if (offset < 0xfe0) { |
400 |
goto bad_reg;
|
401 |
} else /* offset >= 0xfe0 */ { |
402 |
if (offset & 3) { |
403 |
res = 0;
|
404 |
} else {
|
405 |
res = gic_id[(offset - 0xfe0) >> 2]; |
406 |
} |
407 |
} |
408 |
return res;
|
409 |
bad_reg:
|
410 |
hw_error("gic_dist_readb: Bad offset %x\n", (int)offset); |
411 |
return 0; |
412 |
} |
413 |
|
414 |
static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) |
415 |
{ |
416 |
uint32_t val; |
417 |
val = gic_dist_readb(opaque, offset); |
418 |
val |= gic_dist_readb(opaque, offset + 1) << 8; |
419 |
return val;
|
420 |
} |
421 |
|
422 |
static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) |
423 |
{ |
424 |
uint32_t val; |
425 |
val = gic_dist_readw(opaque, offset); |
426 |
val |= gic_dist_readw(opaque, offset + 2) << 16; |
427 |
return val;
|
428 |
} |
429 |
|
430 |
static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, |
431 |
uint32_t value) |
432 |
{ |
433 |
gic_state *s = (gic_state *)opaque; |
434 |
int irq;
|
435 |
int i;
|
436 |
int cpu;
|
437 |
|
438 |
cpu = gic_get_current_cpu(s); |
439 |
if (offset < 0x100) { |
440 |
if (offset == 0) { |
441 |
s->enabled = (value & 1);
|
442 |
DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); |
443 |
} else if (offset < 4) { |
444 |
/* ignored. */
|
445 |
} else if (offset >= 0x80) { |
446 |
/* Interrupt Security Registers, RAZ/WI */
|
447 |
} else {
|
448 |
goto bad_reg;
|
449 |
} |
450 |
} else if (offset < 0x180) { |
451 |
/* Interrupt Set Enable. */
|
452 |
irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; |
453 |
if (irq >= s->num_irq)
|
454 |
goto bad_reg;
|
455 |
if (irq < 16) |
456 |
value = 0xff;
|
457 |
for (i = 0; i < 8; i++) { |
458 |
if (value & (1 << i)) { |
459 |
int mask = (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq); |
460 |
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; |
461 |
|
462 |
if (!GIC_TEST_ENABLED(irq + i, cm)) {
|
463 |
DPRINTF("Enabled IRQ %d\n", irq + i);
|
464 |
} |
465 |
GIC_SET_ENABLED(irq + i, cm); |
466 |
/* If a raised level triggered IRQ enabled then mark
|
467 |
is as pending. */
|
468 |
if (GIC_TEST_LEVEL(irq + i, mask)
|
469 |
&& !GIC_TEST_TRIGGER(irq + i)) { |
470 |
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
|
471 |
GIC_SET_PENDING(irq + i, mask); |
472 |
} |
473 |
} |
474 |
} |
475 |
} else if (offset < 0x200) { |
476 |
/* Interrupt Clear Enable. */
|
477 |
irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; |
478 |
if (irq >= s->num_irq)
|
479 |
goto bad_reg;
|
480 |
if (irq < 16) |
481 |
value = 0;
|
482 |
for (i = 0; i < 8; i++) { |
483 |
if (value & (1 << i)) { |
484 |
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; |
485 |
|
486 |
if (GIC_TEST_ENABLED(irq + i, cm)) {
|
487 |
DPRINTF("Disabled IRQ %d\n", irq + i);
|
488 |
} |
489 |
GIC_CLEAR_ENABLED(irq + i, cm); |
490 |
} |
491 |
} |
492 |
} else if (offset < 0x280) { |
493 |
/* Interrupt Set Pending. */
|
494 |
irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; |
495 |
if (irq >= s->num_irq)
|
496 |
goto bad_reg;
|
497 |
if (irq < 16) |
498 |
irq = 0;
|
499 |
|
500 |
for (i = 0; i < 8; i++) { |
501 |
if (value & (1 << i)) { |
502 |
GIC_SET_PENDING(irq + i, GIC_TARGET(irq)); |
503 |
} |
504 |
} |
505 |
} else if (offset < 0x300) { |
506 |
/* Interrupt Clear Pending. */
|
507 |
irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; |
508 |
if (irq >= s->num_irq)
|
509 |
goto bad_reg;
|
510 |
for (i = 0; i < 8; i++) { |
511 |
/* ??? This currently clears the pending bit for all CPUs, even
|
512 |
for per-CPU interrupts. It's unclear whether this is the
|
513 |
corect behavior. */
|
514 |
if (value & (1 << i)) { |
515 |
GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); |
516 |
} |
517 |
} |
518 |
} else if (offset < 0x400) { |
519 |
/* Interrupt Active. */
|
520 |
goto bad_reg;
|
521 |
} else if (offset < 0x800) { |
522 |
/* Interrupt Priority. */
|
523 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
524 |
if (irq >= s->num_irq)
|
525 |
goto bad_reg;
|
526 |
if (irq < GIC_INTERNAL) {
|
527 |
s->priority1[irq][cpu] = value; |
528 |
} else {
|
529 |
s->priority2[irq - GIC_INTERNAL] = value; |
530 |
} |
531 |
#ifndef NVIC
|
532 |
} else if (offset < 0xc00) { |
533 |
/* Interrupt CPU Target. */
|
534 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
535 |
if (irq >= s->num_irq)
|
536 |
goto bad_reg;
|
537 |
if (irq < 29) |
538 |
value = 0;
|
539 |
else if (irq < GIC_INTERNAL) |
540 |
value = ALL_CPU_MASK; |
541 |
s->irq_target[irq] = value & ALL_CPU_MASK; |
542 |
#endif
|
543 |
} else if (offset < 0xf00) { |
544 |
/* Interrupt Configuration. */
|
545 |
irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; |
546 |
if (irq >= s->num_irq)
|
547 |
goto bad_reg;
|
548 |
if (irq < GIC_INTERNAL)
|
549 |
value |= 0xaa;
|
550 |
for (i = 0; i < 4; i++) { |
551 |
if (value & (1 << (i * 2))) { |
552 |
GIC_SET_MODEL(irq + i); |
553 |
} else {
|
554 |
GIC_CLEAR_MODEL(irq + i); |
555 |
} |
556 |
if (value & (2 << (i * 2))) { |
557 |
GIC_SET_TRIGGER(irq + i); |
558 |
} else {
|
559 |
GIC_CLEAR_TRIGGER(irq + i); |
560 |
} |
561 |
} |
562 |
} else {
|
563 |
/* 0xf00 is only handled for 32-bit writes. */
|
564 |
goto bad_reg;
|
565 |
} |
566 |
gic_update(s); |
567 |
return;
|
568 |
bad_reg:
|
569 |
hw_error("gic_dist_writeb: Bad offset %x\n", (int)offset); |
570 |
} |
571 |
|
572 |
static void gic_dist_writew(void *opaque, target_phys_addr_t offset, |
573 |
uint32_t value) |
574 |
{ |
575 |
gic_dist_writeb(opaque, offset, value & 0xff);
|
576 |
gic_dist_writeb(opaque, offset + 1, value >> 8); |
577 |
} |
578 |
|
579 |
static void gic_dist_writel(void *opaque, target_phys_addr_t offset, |
580 |
uint32_t value) |
581 |
{ |
582 |
gic_state *s = (gic_state *)opaque; |
583 |
if (offset == 0xf00) { |
584 |
int cpu;
|
585 |
int irq;
|
586 |
int mask;
|
587 |
|
588 |
cpu = gic_get_current_cpu(s); |
589 |
irq = value & 0x3ff;
|
590 |
switch ((value >> 24) & 3) { |
591 |
case 0: |
592 |
mask = (value >> 16) & ALL_CPU_MASK;
|
593 |
break;
|
594 |
case 1: |
595 |
mask = ALL_CPU_MASK ^ (1 << cpu);
|
596 |
break;
|
597 |
case 2: |
598 |
mask = 1 << cpu;
|
599 |
break;
|
600 |
default:
|
601 |
DPRINTF("Bad Soft Int target filter\n");
|
602 |
mask = ALL_CPU_MASK; |
603 |
break;
|
604 |
} |
605 |
GIC_SET_PENDING(irq, mask); |
606 |
gic_update(s); |
607 |
return;
|
608 |
} |
609 |
gic_dist_writew(opaque, offset, value & 0xffff);
|
610 |
gic_dist_writew(opaque, offset + 2, value >> 16); |
611 |
} |
612 |
|
613 |
static const MemoryRegionOps gic_dist_ops = { |
614 |
.old_mmio = { |
615 |
.read = { gic_dist_readb, gic_dist_readw, gic_dist_readl, }, |
616 |
.write = { gic_dist_writeb, gic_dist_writew, gic_dist_writel, }, |
617 |
}, |
618 |
.endianness = DEVICE_NATIVE_ENDIAN, |
619 |
}; |
620 |
|
621 |
#ifndef NVIC
|
622 |
static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset) |
623 |
{ |
624 |
switch (offset) {
|
625 |
case 0x00: /* Control */ |
626 |
return s->cpu_enabled[cpu];
|
627 |
case 0x04: /* Priority mask */ |
628 |
return s->priority_mask[cpu];
|
629 |
case 0x08: /* Binary Point */ |
630 |
/* ??? Not implemented. */
|
631 |
return 0; |
632 |
case 0x0c: /* Acknowledge */ |
633 |
return gic_acknowledge_irq(s, cpu);
|
634 |
case 0x14: /* Running Priority */ |
635 |
return s->running_priority[cpu];
|
636 |
case 0x18: /* Highest Pending Interrupt */ |
637 |
return s->current_pending[cpu];
|
638 |
default:
|
639 |
hw_error("gic_cpu_read: Bad offset %x\n", (int)offset); |
640 |
return 0; |
641 |
} |
642 |
} |
643 |
|
644 |
static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value) |
645 |
{ |
646 |
switch (offset) {
|
647 |
case 0x00: /* Control */ |
648 |
s->cpu_enabled[cpu] = (value & 1);
|
649 |
DPRINTF("CPU %d %sabled\n", cpu, s->cpu_enabled ? "En" : "Dis"); |
650 |
break;
|
651 |
case 0x04: /* Priority mask */ |
652 |
s->priority_mask[cpu] = (value & 0xff);
|
653 |
break;
|
654 |
case 0x08: /* Binary Point */ |
655 |
/* ??? Not implemented. */
|
656 |
break;
|
657 |
case 0x10: /* End Of Interrupt */ |
658 |
return gic_complete_irq(s, cpu, value & 0x3ff); |
659 |
default:
|
660 |
hw_error("gic_cpu_write: Bad offset %x\n", (int)offset); |
661 |
return;
|
662 |
} |
663 |
gic_update(s); |
664 |
} |
665 |
|
666 |
/* Wrappers to read/write the GIC CPU interface for the current CPU */
|
667 |
static uint64_t gic_thiscpu_read(void *opaque, target_phys_addr_t addr, |
668 |
unsigned size)
|
669 |
{ |
670 |
gic_state *s = (gic_state *)opaque; |
671 |
return gic_cpu_read(s, gic_get_current_cpu(s), addr);
|
672 |
} |
673 |
|
674 |
static void gic_thiscpu_write(void *opaque, target_phys_addr_t addr, |
675 |
uint64_t value, unsigned size)
|
676 |
{ |
677 |
gic_state *s = (gic_state *)opaque; |
678 |
gic_cpu_write(s, gic_get_current_cpu(s), addr, value); |
679 |
} |
680 |
|
681 |
/* Wrappers to read/write the GIC CPU interface for a specific CPU.
|
682 |
* These just decode the opaque pointer into gic_state* + cpu id.
|
683 |
*/
|
684 |
static uint64_t gic_do_cpu_read(void *opaque, target_phys_addr_t addr, |
685 |
unsigned size)
|
686 |
{ |
687 |
gic_state **backref = (gic_state **)opaque; |
688 |
gic_state *s = *backref; |
689 |
int id = (backref - s->backref);
|
690 |
return gic_cpu_read(s, id, addr);
|
691 |
} |
692 |
|
693 |
static void gic_do_cpu_write(void *opaque, target_phys_addr_t addr, |
694 |
uint64_t value, unsigned size)
|
695 |
{ |
696 |
gic_state **backref = (gic_state **)opaque; |
697 |
gic_state *s = *backref; |
698 |
int id = (backref - s->backref);
|
699 |
gic_cpu_write(s, id, addr, value); |
700 |
} |
701 |
|
702 |
static const MemoryRegionOps gic_thiscpu_ops = { |
703 |
.read = gic_thiscpu_read, |
704 |
.write = gic_thiscpu_write, |
705 |
.endianness = DEVICE_NATIVE_ENDIAN, |
706 |
}; |
707 |
|
708 |
static const MemoryRegionOps gic_cpu_ops = { |
709 |
.read = gic_do_cpu_read, |
710 |
.write = gic_do_cpu_write, |
711 |
.endianness = DEVICE_NATIVE_ENDIAN, |
712 |
}; |
713 |
#endif
|
714 |
|
715 |
static void gic_reset(DeviceState *dev) |
716 |
{ |
717 |
gic_state *s = FROM_SYSBUS(gic_state, sysbus_from_qdev(dev)); |
718 |
int i;
|
719 |
memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); |
720 |
for (i = 0 ; i < NUM_CPU(s); i++) { |
721 |
s->priority_mask[i] = 0xf0;
|
722 |
s->current_pending[i] = 1023;
|
723 |
s->running_irq[i] = 1023;
|
724 |
s->running_priority[i] = 0x100;
|
725 |
s->cpu_enabled[i] = 0;
|
726 |
} |
727 |
for (i = 0; i < 16; i++) { |
728 |
GIC_SET_ENABLED(i, ALL_CPU_MASK); |
729 |
GIC_SET_TRIGGER(i); |
730 |
} |
731 |
s->enabled = 0;
|
732 |
} |
733 |
|
734 |
static void gic_save(QEMUFile *f, void *opaque) |
735 |
{ |
736 |
gic_state *s = (gic_state *)opaque; |
737 |
int i;
|
738 |
int j;
|
739 |
|
740 |
qemu_put_be32(f, s->enabled); |
741 |
for (i = 0; i < NUM_CPU(s); i++) { |
742 |
qemu_put_be32(f, s->cpu_enabled[i]); |
743 |
for (j = 0; j < GIC_INTERNAL; j++) |
744 |
qemu_put_be32(f, s->priority1[j][i]); |
745 |
for (j = 0; j < s->num_irq; j++) |
746 |
qemu_put_be32(f, s->last_active[j][i]); |
747 |
qemu_put_be32(f, s->priority_mask[i]); |
748 |
qemu_put_be32(f, s->running_irq[i]); |
749 |
qemu_put_be32(f, s->running_priority[i]); |
750 |
qemu_put_be32(f, s->current_pending[i]); |
751 |
} |
752 |
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { |
753 |
qemu_put_be32(f, s->priority2[i]); |
754 |
} |
755 |
for (i = 0; i < s->num_irq; i++) { |
756 |
qemu_put_be32(f, s->irq_target[i]); |
757 |
qemu_put_byte(f, s->irq_state[i].enabled); |
758 |
qemu_put_byte(f, s->irq_state[i].pending); |
759 |
qemu_put_byte(f, s->irq_state[i].active); |
760 |
qemu_put_byte(f, s->irq_state[i].level); |
761 |
qemu_put_byte(f, s->irq_state[i].model); |
762 |
qemu_put_byte(f, s->irq_state[i].trigger); |
763 |
} |
764 |
} |
765 |
|
766 |
static int gic_load(QEMUFile *f, void *opaque, int version_id) |
767 |
{ |
768 |
gic_state *s = (gic_state *)opaque; |
769 |
int i;
|
770 |
int j;
|
771 |
|
772 |
if (version_id != 3) { |
773 |
return -EINVAL;
|
774 |
} |
775 |
|
776 |
s->enabled = qemu_get_be32(f); |
777 |
for (i = 0; i < NUM_CPU(s); i++) { |
778 |
s->cpu_enabled[i] = qemu_get_be32(f); |
779 |
for (j = 0; j < GIC_INTERNAL; j++) |
780 |
s->priority1[j][i] = qemu_get_be32(f); |
781 |
for (j = 0; j < s->num_irq; j++) |
782 |
s->last_active[j][i] = qemu_get_be32(f); |
783 |
s->priority_mask[i] = qemu_get_be32(f); |
784 |
s->running_irq[i] = qemu_get_be32(f); |
785 |
s->running_priority[i] = qemu_get_be32(f); |
786 |
s->current_pending[i] = qemu_get_be32(f); |
787 |
} |
788 |
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { |
789 |
s->priority2[i] = qemu_get_be32(f); |
790 |
} |
791 |
for (i = 0; i < s->num_irq; i++) { |
792 |
s->irq_target[i] = qemu_get_be32(f); |
793 |
s->irq_state[i].enabled = qemu_get_byte(f); |
794 |
s->irq_state[i].pending = qemu_get_byte(f); |
795 |
s->irq_state[i].active = qemu_get_byte(f); |
796 |
s->irq_state[i].level = qemu_get_byte(f); |
797 |
s->irq_state[i].model = qemu_get_byte(f); |
798 |
s->irq_state[i].trigger = qemu_get_byte(f); |
799 |
} |
800 |
|
801 |
return 0; |
802 |
} |
803 |
|
804 |
static void gic_init(gic_state *s, int num_irq) |
805 |
{ |
806 |
int i;
|
807 |
|
808 |
if (s->num_cpu > NCPU) {
|
809 |
hw_error("requested %u CPUs exceeds GIC maximum %d\n",
|
810 |
s->num_cpu, NCPU); |
811 |
} |
812 |
s->num_irq = num_irq + GIC_BASE_IRQ; |
813 |
if (s->num_irq > GIC_MAXIRQ) {
|
814 |
hw_error("requested %u interrupt lines exceeds GIC maximum %d\n",
|
815 |
num_irq, GIC_MAXIRQ); |
816 |
} |
817 |
/* ITLinesNumber is represented as (N / 32) - 1 (see
|
818 |
* gic_dist_readb) so this is an implementation imposed
|
819 |
* restriction, not an architectural one:
|
820 |
*/
|
821 |
if (s->num_irq < 32 || (s->num_irq % 32)) { |
822 |
hw_error("%d interrupt lines unsupported: not divisible by 32\n",
|
823 |
num_irq); |
824 |
} |
825 |
|
826 |
i = s->num_irq - GIC_INTERNAL; |
827 |
#ifndef NVIC
|
828 |
/* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
|
829 |
* GPIO array layout is thus:
|
830 |
* [0..N-1] SPIs
|
831 |
* [N..N+31] PPIs for CPU 0
|
832 |
* [N+32..N+63] PPIs for CPU 1
|
833 |
* ...
|
834 |
*/
|
835 |
i += (GIC_INTERNAL * s->num_cpu); |
836 |
#endif
|
837 |
qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, i); |
838 |
for (i = 0; i < NUM_CPU(s); i++) { |
839 |
sysbus_init_irq(&s->busdev, &s->parent_irq[i]); |
840 |
} |
841 |
memory_region_init_io(&s->iomem, &gic_dist_ops, s, "gic_dist", 0x1000); |
842 |
#ifndef NVIC
|
843 |
/* Memory regions for the CPU interfaces (NVIC doesn't have these):
|
844 |
* a region for "CPU interface for this core", then a region for
|
845 |
* "CPU interface for core 0", "for core 1", ...
|
846 |
* NB that the memory region size of 0x100 applies for the 11MPCore
|
847 |
* and also cores following the GIC v1 spec (ie A9).
|
848 |
* GIC v2 defines a larger memory region (0x1000) so this will need
|
849 |
* to be extended when we implement A15.
|
850 |
*/
|
851 |
memory_region_init_io(&s->cpuiomem[0], &gic_thiscpu_ops, s,
|
852 |
"gic_cpu", 0x100); |
853 |
for (i = 0; i < NUM_CPU(s); i++) { |
854 |
s->backref[i] = s; |
855 |
memory_region_init_io(&s->cpuiomem[i+1], &gic_cpu_ops, &s->backref[i],
|
856 |
"gic_cpu", 0x100); |
857 |
} |
858 |
#endif
|
859 |
|
860 |
register_savevm(NULL, "arm_gic", -1, 3, gic_save, gic_load, s); |
861 |
} |
862 |
|
863 |
#ifndef NVIC
|
864 |
|
865 |
static int arm_gic_init(SysBusDevice *dev) |
866 |
{ |
867 |
/* Device instance init function for the GIC sysbus device */
|
868 |
int i;
|
869 |
gic_state *s = FROM_SYSBUS(gic_state, dev); |
870 |
gic_init(s, s->num_irq); |
871 |
/* Distributor */
|
872 |
sysbus_init_mmio(dev, &s->iomem); |
873 |
/* cpu interfaces (one for "current cpu" plus one per cpu) */
|
874 |
for (i = 0; i <= NUM_CPU(s); i++) { |
875 |
sysbus_init_mmio(dev, &s->cpuiomem[i]); |
876 |
} |
877 |
return 0; |
878 |
} |
879 |
|
880 |
static Property arm_gic_properties[] = {
|
881 |
DEFINE_PROP_UINT32("num-cpu", gic_state, num_cpu, 1), |
882 |
DEFINE_PROP_UINT32("num-irq", gic_state, num_irq, 32), |
883 |
DEFINE_PROP_END_OF_LIST(), |
884 |
}; |
885 |
|
886 |
static void arm_gic_class_init(ObjectClass *klass, void *data) |
887 |
{ |
888 |
DeviceClass *dc = DEVICE_CLASS(klass); |
889 |
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); |
890 |
sbc->init = arm_gic_init; |
891 |
dc->props = arm_gic_properties; |
892 |
dc->reset = gic_reset; |
893 |
dc->no_user = 1;
|
894 |
} |
895 |
|
896 |
static TypeInfo arm_gic_info = {
|
897 |
.name = "arm_gic",
|
898 |
.parent = TYPE_SYS_BUS_DEVICE, |
899 |
.instance_size = sizeof(gic_state),
|
900 |
.class_init = arm_gic_class_init, |
901 |
}; |
902 |
|
903 |
static void arm_gic_register_types(void) |
904 |
{ |
905 |
type_register_static(&arm_gic_info); |
906 |
} |
907 |
|
908 |
type_init(arm_gic_register_types) |
909 |
|
910 |
#endif
|