root / hw / arm_gic.c @ 84e4fccb
History | View | Annotate | Download (28.6 kB)
1 |
/*
|
---|---|
2 |
* ARM Generic/Distributed Interrupt Controller
|
3 |
*
|
4 |
* Copyright (c) 2006-2007 CodeSourcery.
|
5 |
* Written by Paul Brook
|
6 |
*
|
7 |
* This code is licensed under the GPL.
|
8 |
*/
|
9 |
|
10 |
/* This file contains implementation code for the RealView EB interrupt
|
11 |
* controller, MPCore distributed interrupt controller and ARMv7-M
|
12 |
* Nested Vectored Interrupt Controller.
|
13 |
* It is compiled in two ways:
|
14 |
* (1) as a standalone file to produce a sysbus device which is a GIC
|
15 |
* that can be used on the realview board and as one of the builtin
|
16 |
* private peripherals for the ARM MP CPUs (11MPCore, A9, etc)
|
17 |
* (2) by being directly #included into armv7m_nvic.c to produce the
|
18 |
* armv7m_nvic device.
|
19 |
*/
|
20 |
|
21 |
#include "sysbus.h" |
22 |
|
23 |
/* Maximum number of possible interrupts, determined by the GIC architecture */
|
24 |
#define GIC_MAXIRQ 1020 |
25 |
/* First 32 are private to each CPU (SGIs and PPIs). */
|
26 |
#define GIC_INTERNAL 32 |
27 |
/* Maximum number of possible CPU interfaces, determined by GIC architecture */
|
28 |
#define NCPU 8 |
29 |
|
30 |
//#define DEBUG_GIC
|
31 |
|
32 |
#ifdef DEBUG_GIC
|
33 |
#define DPRINTF(fmt, ...) \
|
34 |
do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0) |
35 |
#else
|
36 |
#define DPRINTF(fmt, ...) do {} while(0) |
37 |
#endif
|
38 |
|
39 |
/* The NVIC has 16 internal vectors. However these are not exposed
|
40 |
through the normal GIC interface. */
|
41 |
#define GIC_BASE_IRQ ((s->revision == REV_NVIC) ? 32 : 0) |
42 |
|
43 |
static const uint8_t gic_id[] = { |
44 |
0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 |
45 |
}; |
46 |
|
47 |
#define FROM_SYSBUSGIC(type, dev) \
|
48 |
DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev)) |
49 |
|
50 |
typedef struct gic_irq_state |
51 |
{ |
52 |
/* The enable bits are only banked for per-cpu interrupts. */
|
53 |
unsigned enabled:NCPU;
|
54 |
unsigned pending:NCPU;
|
55 |
unsigned active:NCPU;
|
56 |
unsigned level:NCPU;
|
57 |
unsigned model:1; /* 0 = N:N, 1 = 1:N */ |
58 |
unsigned trigger:1; /* nonzero = edge triggered. */ |
59 |
} gic_irq_state; |
60 |
|
61 |
#define ALL_CPU_MASK ((unsigned)(((1 << NCPU) - 1))) |
62 |
#define NUM_CPU(s) ((s)->num_cpu)
|
63 |
|
64 |
#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
|
65 |
#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
|
66 |
#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) |
67 |
#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
|
68 |
#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
|
69 |
#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) |
70 |
#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
|
71 |
#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
|
72 |
#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) |
73 |
#define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 |
74 |
#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 |
75 |
#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
|
76 |
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
|
77 |
#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
|
78 |
#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) |
79 |
#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 |
80 |
#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 |
81 |
#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
|
82 |
#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
|
83 |
s->priority1[irq][cpu] : \ |
84 |
s->priority2[(irq) - GIC_INTERNAL]) |
85 |
#define GIC_TARGET(irq) s->irq_target[irq]
|
86 |
|
87 |
typedef struct gic_state |
88 |
{ |
89 |
SysBusDevice busdev; |
90 |
qemu_irq parent_irq[NCPU]; |
91 |
int enabled;
|
92 |
int cpu_enabled[NCPU];
|
93 |
|
94 |
gic_irq_state irq_state[GIC_MAXIRQ]; |
95 |
int irq_target[GIC_MAXIRQ];
|
96 |
int priority1[GIC_INTERNAL][NCPU];
|
97 |
int priority2[GIC_MAXIRQ - GIC_INTERNAL];
|
98 |
int last_active[GIC_MAXIRQ][NCPU];
|
99 |
|
100 |
int priority_mask[NCPU];
|
101 |
int running_irq[NCPU];
|
102 |
int running_priority[NCPU];
|
103 |
int current_pending[NCPU];
|
104 |
|
105 |
uint32_t num_cpu; |
106 |
|
107 |
MemoryRegion iomem; /* Distributor */
|
108 |
/* This is just so we can have an opaque pointer which identifies
|
109 |
* both this GIC and which CPU interface we should be accessing.
|
110 |
*/
|
111 |
struct gic_state *backref[NCPU];
|
112 |
MemoryRegion cpuiomem[NCPU+1]; /* CPU interfaces */ |
113 |
uint32_t num_irq; |
114 |
uint32_t revision; |
115 |
} gic_state; |
116 |
|
117 |
/* The special cases for the revision property: */
|
118 |
#define REV_11MPCORE 0 |
119 |
#define REV_NVIC 0xffffffff |
120 |
|
121 |
static inline int gic_get_current_cpu(gic_state *s) |
122 |
{ |
123 |
if (s->num_cpu > 1) { |
124 |
return cpu_single_env->cpu_index;
|
125 |
} |
126 |
return 0; |
127 |
} |
128 |
|
129 |
/* TODO: Many places that call this routine could be optimized. */
|
130 |
/* Update interrupt status after enabled or pending bits have been changed. */
|
131 |
static void gic_update(gic_state *s) |
132 |
{ |
133 |
int best_irq;
|
134 |
int best_prio;
|
135 |
int irq;
|
136 |
int level;
|
137 |
int cpu;
|
138 |
int cm;
|
139 |
|
140 |
for (cpu = 0; cpu < NUM_CPU(s); cpu++) { |
141 |
cm = 1 << cpu;
|
142 |
s->current_pending[cpu] = 1023;
|
143 |
if (!s->enabled || !s->cpu_enabled[cpu]) {
|
144 |
qemu_irq_lower(s->parent_irq[cpu]); |
145 |
return;
|
146 |
} |
147 |
best_prio = 0x100;
|
148 |
best_irq = 1023;
|
149 |
for (irq = 0; irq < s->num_irq; irq++) { |
150 |
if (GIC_TEST_ENABLED(irq, cm) && GIC_TEST_PENDING(irq, cm)) {
|
151 |
if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
|
152 |
best_prio = GIC_GET_PRIORITY(irq, cpu); |
153 |
best_irq = irq; |
154 |
} |
155 |
} |
156 |
} |
157 |
level = 0;
|
158 |
if (best_prio <= s->priority_mask[cpu]) {
|
159 |
s->current_pending[cpu] = best_irq; |
160 |
if (best_prio < s->running_priority[cpu]) {
|
161 |
DPRINTF("Raised pending IRQ %d\n", best_irq);
|
162 |
level = 1;
|
163 |
} |
164 |
} |
165 |
qemu_set_irq(s->parent_irq[cpu], level); |
166 |
} |
167 |
} |
168 |
|
169 |
#ifdef NVIC
|
170 |
static void gic_set_pending_private(gic_state *s, int cpu, int irq) |
171 |
{ |
172 |
int cm = 1 << cpu; |
173 |
|
174 |
if (GIC_TEST_PENDING(irq, cm))
|
175 |
return;
|
176 |
|
177 |
DPRINTF("Set %d pending cpu %d\n", irq, cpu);
|
178 |
GIC_SET_PENDING(irq, cm); |
179 |
gic_update(s); |
180 |
} |
181 |
#endif
|
182 |
|
183 |
/* Process a change in an external IRQ input. */
|
184 |
static void gic_set_irq(void *opaque, int irq, int level) |
185 |
{ |
186 |
/* Meaning of the 'irq' parameter:
|
187 |
* [0..N-1] : external interrupts
|
188 |
* [N..N+31] : PPI (internal) interrupts for CPU 0
|
189 |
* [N+32..N+63] : PPI (internal interrupts for CPU 1
|
190 |
* ...
|
191 |
*/
|
192 |
gic_state *s = (gic_state *)opaque; |
193 |
int cm, target;
|
194 |
if (irq < (s->num_irq - GIC_INTERNAL)) {
|
195 |
/* The first external input line is internal interrupt 32. */
|
196 |
cm = ALL_CPU_MASK; |
197 |
irq += GIC_INTERNAL; |
198 |
target = GIC_TARGET(irq); |
199 |
} else {
|
200 |
int cpu;
|
201 |
irq -= (s->num_irq - GIC_INTERNAL); |
202 |
cpu = irq / GIC_INTERNAL; |
203 |
irq %= GIC_INTERNAL; |
204 |
cm = 1 << cpu;
|
205 |
target = cm; |
206 |
} |
207 |
|
208 |
if (level == GIC_TEST_LEVEL(irq, cm)) {
|
209 |
return;
|
210 |
} |
211 |
|
212 |
if (level) {
|
213 |
GIC_SET_LEVEL(irq, cm); |
214 |
if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
|
215 |
DPRINTF("Set %d pending mask %x\n", irq, target);
|
216 |
GIC_SET_PENDING(irq, target); |
217 |
} |
218 |
} else {
|
219 |
GIC_CLEAR_LEVEL(irq, cm); |
220 |
} |
221 |
gic_update(s); |
222 |
} |
223 |
|
224 |
static void gic_set_running_irq(gic_state *s, int cpu, int irq) |
225 |
{ |
226 |
s->running_irq[cpu] = irq; |
227 |
if (irq == 1023) { |
228 |
s->running_priority[cpu] = 0x100;
|
229 |
} else {
|
230 |
s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); |
231 |
} |
232 |
gic_update(s); |
233 |
} |
234 |
|
235 |
static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) |
236 |
{ |
237 |
int new_irq;
|
238 |
int cm = 1 << cpu; |
239 |
new_irq = s->current_pending[cpu]; |
240 |
if (new_irq == 1023 |
241 |
|| GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) { |
242 |
DPRINTF("ACK no pending IRQ\n");
|
243 |
return 1023; |
244 |
} |
245 |
s->last_active[new_irq][cpu] = s->running_irq[cpu]; |
246 |
/* Clear pending flags for both level and edge triggered interrupts.
|
247 |
Level triggered IRQs will be reasserted once they become inactive. */
|
248 |
GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm); |
249 |
gic_set_running_irq(s, cpu, new_irq); |
250 |
DPRINTF("ACK %d\n", new_irq);
|
251 |
return new_irq;
|
252 |
} |
253 |
|
254 |
static void gic_complete_irq(gic_state * s, int cpu, int irq) |
255 |
{ |
256 |
int update = 0; |
257 |
int cm = 1 << cpu; |
258 |
DPRINTF("EOI %d\n", irq);
|
259 |
if (irq >= s->num_irq) {
|
260 |
/* This handles two cases:
|
261 |
* 1. If software writes the ID of a spurious interrupt [ie 1023]
|
262 |
* to the GICC_EOIR, the GIC ignores that write.
|
263 |
* 2. If software writes the number of a non-existent interrupt
|
264 |
* this must be a subcase of "value written does not match the last
|
265 |
* valid interrupt value read from the Interrupt Acknowledge
|
266 |
* register" and so this is UNPREDICTABLE. We choose to ignore it.
|
267 |
*/
|
268 |
return;
|
269 |
} |
270 |
if (s->running_irq[cpu] == 1023) |
271 |
return; /* No active IRQ. */ |
272 |
/* Mark level triggered interrupts as pending if they are still
|
273 |
raised. */
|
274 |
if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
|
275 |
&& GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
|
276 |
DPRINTF("Set %d pending mask %x\n", irq, cm);
|
277 |
GIC_SET_PENDING(irq, cm); |
278 |
update = 1;
|
279 |
} |
280 |
if (irq != s->running_irq[cpu]) {
|
281 |
/* Complete an IRQ that is not currently running. */
|
282 |
int tmp = s->running_irq[cpu];
|
283 |
while (s->last_active[tmp][cpu] != 1023) { |
284 |
if (s->last_active[tmp][cpu] == irq) {
|
285 |
s->last_active[tmp][cpu] = s->last_active[irq][cpu]; |
286 |
break;
|
287 |
} |
288 |
tmp = s->last_active[tmp][cpu]; |
289 |
} |
290 |
if (update) {
|
291 |
gic_update(s); |
292 |
} |
293 |
} else {
|
294 |
/* Complete the current running IRQ. */
|
295 |
gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); |
296 |
} |
297 |
} |
298 |
|
299 |
static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) |
300 |
{ |
301 |
gic_state *s = (gic_state *)opaque; |
302 |
uint32_t res; |
303 |
int irq;
|
304 |
int i;
|
305 |
int cpu;
|
306 |
int cm;
|
307 |
int mask;
|
308 |
|
309 |
cpu = gic_get_current_cpu(s); |
310 |
cm = 1 << cpu;
|
311 |
if (offset < 0x100) { |
312 |
if (offset == 0) |
313 |
return s->enabled;
|
314 |
if (offset == 4) |
315 |
return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5); |
316 |
if (offset < 0x08) |
317 |
return 0; |
318 |
if (offset >= 0x80) { |
319 |
/* Interrupt Security , RAZ/WI */
|
320 |
return 0; |
321 |
} |
322 |
goto bad_reg;
|
323 |
} else if (offset < 0x200) { |
324 |
/* Interrupt Set/Clear Enable. */
|
325 |
if (offset < 0x180) |
326 |
irq = (offset - 0x100) * 8; |
327 |
else
|
328 |
irq = (offset - 0x180) * 8; |
329 |
irq += GIC_BASE_IRQ; |
330 |
if (irq >= s->num_irq)
|
331 |
goto bad_reg;
|
332 |
res = 0;
|
333 |
for (i = 0; i < 8; i++) { |
334 |
if (GIC_TEST_ENABLED(irq + i, cm)) {
|
335 |
res |= (1 << i);
|
336 |
} |
337 |
} |
338 |
} else if (offset < 0x300) { |
339 |
/* Interrupt Set/Clear Pending. */
|
340 |
if (offset < 0x280) |
341 |
irq = (offset - 0x200) * 8; |
342 |
else
|
343 |
irq = (offset - 0x280) * 8; |
344 |
irq += GIC_BASE_IRQ; |
345 |
if (irq >= s->num_irq)
|
346 |
goto bad_reg;
|
347 |
res = 0;
|
348 |
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; |
349 |
for (i = 0; i < 8; i++) { |
350 |
if (GIC_TEST_PENDING(irq + i, mask)) {
|
351 |
res |= (1 << i);
|
352 |
} |
353 |
} |
354 |
} else if (offset < 0x400) { |
355 |
/* Interrupt Active. */
|
356 |
irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; |
357 |
if (irq >= s->num_irq)
|
358 |
goto bad_reg;
|
359 |
res = 0;
|
360 |
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; |
361 |
for (i = 0; i < 8; i++) { |
362 |
if (GIC_TEST_ACTIVE(irq + i, mask)) {
|
363 |
res |= (1 << i);
|
364 |
} |
365 |
} |
366 |
} else if (offset < 0x800) { |
367 |
/* Interrupt Priority. */
|
368 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
369 |
if (irq >= s->num_irq)
|
370 |
goto bad_reg;
|
371 |
res = GIC_GET_PRIORITY(irq, cpu); |
372 |
} else if (offset < 0xc00) { |
373 |
/* Interrupt CPU Target. */
|
374 |
if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { |
375 |
/* For uniprocessor GICs these RAZ/WI */
|
376 |
res = 0;
|
377 |
} else {
|
378 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
379 |
if (irq >= s->num_irq) {
|
380 |
goto bad_reg;
|
381 |
} |
382 |
if (irq >= 29 && irq <= 31) { |
383 |
res = cm; |
384 |
} else {
|
385 |
res = GIC_TARGET(irq); |
386 |
} |
387 |
} |
388 |
} else if (offset < 0xf00) { |
389 |
/* Interrupt Configuration. */
|
390 |
irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; |
391 |
if (irq >= s->num_irq)
|
392 |
goto bad_reg;
|
393 |
res = 0;
|
394 |
for (i = 0; i < 4; i++) { |
395 |
if (GIC_TEST_MODEL(irq + i))
|
396 |
res |= (1 << (i * 2)); |
397 |
if (GIC_TEST_TRIGGER(irq + i))
|
398 |
res |= (2 << (i * 2)); |
399 |
} |
400 |
} else if (offset < 0xfe0) { |
401 |
goto bad_reg;
|
402 |
} else /* offset >= 0xfe0 */ { |
403 |
if (offset & 3) { |
404 |
res = 0;
|
405 |
} else {
|
406 |
res = gic_id[(offset - 0xfe0) >> 2]; |
407 |
} |
408 |
} |
409 |
return res;
|
410 |
bad_reg:
|
411 |
hw_error("gic_dist_readb: Bad offset %x\n", (int)offset); |
412 |
return 0; |
413 |
} |
414 |
|
415 |
static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) |
416 |
{ |
417 |
uint32_t val; |
418 |
val = gic_dist_readb(opaque, offset); |
419 |
val |= gic_dist_readb(opaque, offset + 1) << 8; |
420 |
return val;
|
421 |
} |
422 |
|
423 |
static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) |
424 |
{ |
425 |
uint32_t val; |
426 |
val = gic_dist_readw(opaque, offset); |
427 |
val |= gic_dist_readw(opaque, offset + 2) << 16; |
428 |
return val;
|
429 |
} |
430 |
|
431 |
static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, |
432 |
uint32_t value) |
433 |
{ |
434 |
gic_state *s = (gic_state *)opaque; |
435 |
int irq;
|
436 |
int i;
|
437 |
int cpu;
|
438 |
|
439 |
cpu = gic_get_current_cpu(s); |
440 |
if (offset < 0x100) { |
441 |
if (offset == 0) { |
442 |
s->enabled = (value & 1);
|
443 |
DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); |
444 |
} else if (offset < 4) { |
445 |
/* ignored. */
|
446 |
} else if (offset >= 0x80) { |
447 |
/* Interrupt Security Registers, RAZ/WI */
|
448 |
} else {
|
449 |
goto bad_reg;
|
450 |
} |
451 |
} else if (offset < 0x180) { |
452 |
/* Interrupt Set Enable. */
|
453 |
irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; |
454 |
if (irq >= s->num_irq)
|
455 |
goto bad_reg;
|
456 |
if (irq < 16) |
457 |
value = 0xff;
|
458 |
for (i = 0; i < 8; i++) { |
459 |
if (value & (1 << i)) { |
460 |
int mask = (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq); |
461 |
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; |
462 |
|
463 |
if (!GIC_TEST_ENABLED(irq + i, cm)) {
|
464 |
DPRINTF("Enabled IRQ %d\n", irq + i);
|
465 |
} |
466 |
GIC_SET_ENABLED(irq + i, cm); |
467 |
/* If a raised level triggered IRQ enabled then mark
|
468 |
is as pending. */
|
469 |
if (GIC_TEST_LEVEL(irq + i, mask)
|
470 |
&& !GIC_TEST_TRIGGER(irq + i)) { |
471 |
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
|
472 |
GIC_SET_PENDING(irq + i, mask); |
473 |
} |
474 |
} |
475 |
} |
476 |
} else if (offset < 0x200) { |
477 |
/* Interrupt Clear Enable. */
|
478 |
irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; |
479 |
if (irq >= s->num_irq)
|
480 |
goto bad_reg;
|
481 |
if (irq < 16) |
482 |
value = 0;
|
483 |
for (i = 0; i < 8; i++) { |
484 |
if (value & (1 << i)) { |
485 |
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; |
486 |
|
487 |
if (GIC_TEST_ENABLED(irq + i, cm)) {
|
488 |
DPRINTF("Disabled IRQ %d\n", irq + i);
|
489 |
} |
490 |
GIC_CLEAR_ENABLED(irq + i, cm); |
491 |
} |
492 |
} |
493 |
} else if (offset < 0x280) { |
494 |
/* Interrupt Set Pending. */
|
495 |
irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; |
496 |
if (irq >= s->num_irq)
|
497 |
goto bad_reg;
|
498 |
if (irq < 16) |
499 |
irq = 0;
|
500 |
|
501 |
for (i = 0; i < 8; i++) { |
502 |
if (value & (1 << i)) { |
503 |
GIC_SET_PENDING(irq + i, GIC_TARGET(irq)); |
504 |
} |
505 |
} |
506 |
} else if (offset < 0x300) { |
507 |
/* Interrupt Clear Pending. */
|
508 |
irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; |
509 |
if (irq >= s->num_irq)
|
510 |
goto bad_reg;
|
511 |
for (i = 0; i < 8; i++) { |
512 |
/* ??? This currently clears the pending bit for all CPUs, even
|
513 |
for per-CPU interrupts. It's unclear whether this is the
|
514 |
corect behavior. */
|
515 |
if (value & (1 << i)) { |
516 |
GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); |
517 |
} |
518 |
} |
519 |
} else if (offset < 0x400) { |
520 |
/* Interrupt Active. */
|
521 |
goto bad_reg;
|
522 |
} else if (offset < 0x800) { |
523 |
/* Interrupt Priority. */
|
524 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
525 |
if (irq >= s->num_irq)
|
526 |
goto bad_reg;
|
527 |
if (irq < GIC_INTERNAL) {
|
528 |
s->priority1[irq][cpu] = value; |
529 |
} else {
|
530 |
s->priority2[irq - GIC_INTERNAL] = value; |
531 |
} |
532 |
} else if (offset < 0xc00) { |
533 |
/* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
|
534 |
* annoying exception of the 11MPCore's GIC.
|
535 |
*/
|
536 |
if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { |
537 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
538 |
if (irq >= s->num_irq) {
|
539 |
goto bad_reg;
|
540 |
} |
541 |
if (irq < 29) { |
542 |
value = 0;
|
543 |
} else if (irq < GIC_INTERNAL) { |
544 |
value = ALL_CPU_MASK; |
545 |
} |
546 |
s->irq_target[irq] = value & ALL_CPU_MASK; |
547 |
} |
548 |
} else if (offset < 0xf00) { |
549 |
/* Interrupt Configuration. */
|
550 |
irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; |
551 |
if (irq >= s->num_irq)
|
552 |
goto bad_reg;
|
553 |
if (irq < GIC_INTERNAL)
|
554 |
value |= 0xaa;
|
555 |
for (i = 0; i < 4; i++) { |
556 |
if (value & (1 << (i * 2))) { |
557 |
GIC_SET_MODEL(irq + i); |
558 |
} else {
|
559 |
GIC_CLEAR_MODEL(irq + i); |
560 |
} |
561 |
if (value & (2 << (i * 2))) { |
562 |
GIC_SET_TRIGGER(irq + i); |
563 |
} else {
|
564 |
GIC_CLEAR_TRIGGER(irq + i); |
565 |
} |
566 |
} |
567 |
} else {
|
568 |
/* 0xf00 is only handled for 32-bit writes. */
|
569 |
goto bad_reg;
|
570 |
} |
571 |
gic_update(s); |
572 |
return;
|
573 |
bad_reg:
|
574 |
hw_error("gic_dist_writeb: Bad offset %x\n", (int)offset); |
575 |
} |
576 |
|
577 |
static void gic_dist_writew(void *opaque, target_phys_addr_t offset, |
578 |
uint32_t value) |
579 |
{ |
580 |
gic_dist_writeb(opaque, offset, value & 0xff);
|
581 |
gic_dist_writeb(opaque, offset + 1, value >> 8); |
582 |
} |
583 |
|
584 |
static void gic_dist_writel(void *opaque, target_phys_addr_t offset, |
585 |
uint32_t value) |
586 |
{ |
587 |
gic_state *s = (gic_state *)opaque; |
588 |
if (offset == 0xf00) { |
589 |
int cpu;
|
590 |
int irq;
|
591 |
int mask;
|
592 |
|
593 |
cpu = gic_get_current_cpu(s); |
594 |
irq = value & 0x3ff;
|
595 |
switch ((value >> 24) & 3) { |
596 |
case 0: |
597 |
mask = (value >> 16) & ALL_CPU_MASK;
|
598 |
break;
|
599 |
case 1: |
600 |
mask = ALL_CPU_MASK ^ (1 << cpu);
|
601 |
break;
|
602 |
case 2: |
603 |
mask = 1 << cpu;
|
604 |
break;
|
605 |
default:
|
606 |
DPRINTF("Bad Soft Int target filter\n");
|
607 |
mask = ALL_CPU_MASK; |
608 |
break;
|
609 |
} |
610 |
GIC_SET_PENDING(irq, mask); |
611 |
gic_update(s); |
612 |
return;
|
613 |
} |
614 |
gic_dist_writew(opaque, offset, value & 0xffff);
|
615 |
gic_dist_writew(opaque, offset + 2, value >> 16); |
616 |
} |
617 |
|
618 |
static const MemoryRegionOps gic_dist_ops = { |
619 |
.old_mmio = { |
620 |
.read = { gic_dist_readb, gic_dist_readw, gic_dist_readl, }, |
621 |
.write = { gic_dist_writeb, gic_dist_writew, gic_dist_writel, }, |
622 |
}, |
623 |
.endianness = DEVICE_NATIVE_ENDIAN, |
624 |
}; |
625 |
|
626 |
#ifndef NVIC
|
627 |
static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset) |
628 |
{ |
629 |
switch (offset) {
|
630 |
case 0x00: /* Control */ |
631 |
return s->cpu_enabled[cpu];
|
632 |
case 0x04: /* Priority mask */ |
633 |
return s->priority_mask[cpu];
|
634 |
case 0x08: /* Binary Point */ |
635 |
/* ??? Not implemented. */
|
636 |
return 0; |
637 |
case 0x0c: /* Acknowledge */ |
638 |
return gic_acknowledge_irq(s, cpu);
|
639 |
case 0x14: /* Running Priority */ |
640 |
return s->running_priority[cpu];
|
641 |
case 0x18: /* Highest Pending Interrupt */ |
642 |
return s->current_pending[cpu];
|
643 |
default:
|
644 |
hw_error("gic_cpu_read: Bad offset %x\n", (int)offset); |
645 |
return 0; |
646 |
} |
647 |
} |
648 |
|
649 |
static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value) |
650 |
{ |
651 |
switch (offset) {
|
652 |
case 0x00: /* Control */ |
653 |
s->cpu_enabled[cpu] = (value & 1);
|
654 |
DPRINTF("CPU %d %sabled\n", cpu, s->cpu_enabled ? "En" : "Dis"); |
655 |
break;
|
656 |
case 0x04: /* Priority mask */ |
657 |
s->priority_mask[cpu] = (value & 0xff);
|
658 |
break;
|
659 |
case 0x08: /* Binary Point */ |
660 |
/* ??? Not implemented. */
|
661 |
break;
|
662 |
case 0x10: /* End Of Interrupt */ |
663 |
return gic_complete_irq(s, cpu, value & 0x3ff); |
664 |
default:
|
665 |
hw_error("gic_cpu_write: Bad offset %x\n", (int)offset); |
666 |
return;
|
667 |
} |
668 |
gic_update(s); |
669 |
} |
670 |
|
671 |
/* Wrappers to read/write the GIC CPU interface for the current CPU */
|
672 |
static uint64_t gic_thiscpu_read(void *opaque, target_phys_addr_t addr, |
673 |
unsigned size)
|
674 |
{ |
675 |
gic_state *s = (gic_state *)opaque; |
676 |
return gic_cpu_read(s, gic_get_current_cpu(s), addr);
|
677 |
} |
678 |
|
679 |
static void gic_thiscpu_write(void *opaque, target_phys_addr_t addr, |
680 |
uint64_t value, unsigned size)
|
681 |
{ |
682 |
gic_state *s = (gic_state *)opaque; |
683 |
gic_cpu_write(s, gic_get_current_cpu(s), addr, value); |
684 |
} |
685 |
|
686 |
/* Wrappers to read/write the GIC CPU interface for a specific CPU.
|
687 |
* These just decode the opaque pointer into gic_state* + cpu id.
|
688 |
*/
|
689 |
static uint64_t gic_do_cpu_read(void *opaque, target_phys_addr_t addr, |
690 |
unsigned size)
|
691 |
{ |
692 |
gic_state **backref = (gic_state **)opaque; |
693 |
gic_state *s = *backref; |
694 |
int id = (backref - s->backref);
|
695 |
return gic_cpu_read(s, id, addr);
|
696 |
} |
697 |
|
698 |
static void gic_do_cpu_write(void *opaque, target_phys_addr_t addr, |
699 |
uint64_t value, unsigned size)
|
700 |
{ |
701 |
gic_state **backref = (gic_state **)opaque; |
702 |
gic_state *s = *backref; |
703 |
int id = (backref - s->backref);
|
704 |
gic_cpu_write(s, id, addr, value); |
705 |
} |
706 |
|
707 |
static const MemoryRegionOps gic_thiscpu_ops = { |
708 |
.read = gic_thiscpu_read, |
709 |
.write = gic_thiscpu_write, |
710 |
.endianness = DEVICE_NATIVE_ENDIAN, |
711 |
}; |
712 |
|
713 |
static const MemoryRegionOps gic_cpu_ops = { |
714 |
.read = gic_do_cpu_read, |
715 |
.write = gic_do_cpu_write, |
716 |
.endianness = DEVICE_NATIVE_ENDIAN, |
717 |
}; |
718 |
#endif
|
719 |
|
720 |
static void gic_reset(DeviceState *dev) |
721 |
{ |
722 |
gic_state *s = FROM_SYSBUS(gic_state, sysbus_from_qdev(dev)); |
723 |
int i;
|
724 |
memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state)); |
725 |
for (i = 0 ; i < NUM_CPU(s); i++) { |
726 |
s->priority_mask[i] = 0xf0;
|
727 |
s->current_pending[i] = 1023;
|
728 |
s->running_irq[i] = 1023;
|
729 |
s->running_priority[i] = 0x100;
|
730 |
s->cpu_enabled[i] = 0;
|
731 |
} |
732 |
for (i = 0; i < 16; i++) { |
733 |
GIC_SET_ENABLED(i, ALL_CPU_MASK); |
734 |
GIC_SET_TRIGGER(i); |
735 |
} |
736 |
if (s->num_cpu == 1) { |
737 |
/* For uniprocessor GICs all interrupts always target the sole CPU */
|
738 |
for (i = 0; i < GIC_MAXIRQ; i++) { |
739 |
s->irq_target[i] = 1;
|
740 |
} |
741 |
} |
742 |
s->enabled = 0;
|
743 |
} |
744 |
|
745 |
static void gic_save(QEMUFile *f, void *opaque) |
746 |
{ |
747 |
gic_state *s = (gic_state *)opaque; |
748 |
int i;
|
749 |
int j;
|
750 |
|
751 |
qemu_put_be32(f, s->enabled); |
752 |
for (i = 0; i < NUM_CPU(s); i++) { |
753 |
qemu_put_be32(f, s->cpu_enabled[i]); |
754 |
for (j = 0; j < GIC_INTERNAL; j++) |
755 |
qemu_put_be32(f, s->priority1[j][i]); |
756 |
for (j = 0; j < s->num_irq; j++) |
757 |
qemu_put_be32(f, s->last_active[j][i]); |
758 |
qemu_put_be32(f, s->priority_mask[i]); |
759 |
qemu_put_be32(f, s->running_irq[i]); |
760 |
qemu_put_be32(f, s->running_priority[i]); |
761 |
qemu_put_be32(f, s->current_pending[i]); |
762 |
} |
763 |
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { |
764 |
qemu_put_be32(f, s->priority2[i]); |
765 |
} |
766 |
for (i = 0; i < s->num_irq; i++) { |
767 |
qemu_put_be32(f, s->irq_target[i]); |
768 |
qemu_put_byte(f, s->irq_state[i].enabled); |
769 |
qemu_put_byte(f, s->irq_state[i].pending); |
770 |
qemu_put_byte(f, s->irq_state[i].active); |
771 |
qemu_put_byte(f, s->irq_state[i].level); |
772 |
qemu_put_byte(f, s->irq_state[i].model); |
773 |
qemu_put_byte(f, s->irq_state[i].trigger); |
774 |
} |
775 |
} |
776 |
|
777 |
static int gic_load(QEMUFile *f, void *opaque, int version_id) |
778 |
{ |
779 |
gic_state *s = (gic_state *)opaque; |
780 |
int i;
|
781 |
int j;
|
782 |
|
783 |
if (version_id != 3) { |
784 |
return -EINVAL;
|
785 |
} |
786 |
|
787 |
s->enabled = qemu_get_be32(f); |
788 |
for (i = 0; i < NUM_CPU(s); i++) { |
789 |
s->cpu_enabled[i] = qemu_get_be32(f); |
790 |
for (j = 0; j < GIC_INTERNAL; j++) |
791 |
s->priority1[j][i] = qemu_get_be32(f); |
792 |
for (j = 0; j < s->num_irq; j++) |
793 |
s->last_active[j][i] = qemu_get_be32(f); |
794 |
s->priority_mask[i] = qemu_get_be32(f); |
795 |
s->running_irq[i] = qemu_get_be32(f); |
796 |
s->running_priority[i] = qemu_get_be32(f); |
797 |
s->current_pending[i] = qemu_get_be32(f); |
798 |
} |
799 |
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { |
800 |
s->priority2[i] = qemu_get_be32(f); |
801 |
} |
802 |
for (i = 0; i < s->num_irq; i++) { |
803 |
s->irq_target[i] = qemu_get_be32(f); |
804 |
s->irq_state[i].enabled = qemu_get_byte(f); |
805 |
s->irq_state[i].pending = qemu_get_byte(f); |
806 |
s->irq_state[i].active = qemu_get_byte(f); |
807 |
s->irq_state[i].level = qemu_get_byte(f); |
808 |
s->irq_state[i].model = qemu_get_byte(f); |
809 |
s->irq_state[i].trigger = qemu_get_byte(f); |
810 |
} |
811 |
|
812 |
return 0; |
813 |
} |
814 |
|
815 |
static void gic_init(gic_state *s, int num_irq) |
816 |
{ |
817 |
int i;
|
818 |
|
819 |
if (s->num_cpu > NCPU) {
|
820 |
hw_error("requested %u CPUs exceeds GIC maximum %d\n",
|
821 |
s->num_cpu, NCPU); |
822 |
} |
823 |
s->num_irq = num_irq + GIC_BASE_IRQ; |
824 |
if (s->num_irq > GIC_MAXIRQ) {
|
825 |
hw_error("requested %u interrupt lines exceeds GIC maximum %d\n",
|
826 |
num_irq, GIC_MAXIRQ); |
827 |
} |
828 |
/* ITLinesNumber is represented as (N / 32) - 1 (see
|
829 |
* gic_dist_readb) so this is an implementation imposed
|
830 |
* restriction, not an architectural one:
|
831 |
*/
|
832 |
if (s->num_irq < 32 || (s->num_irq % 32)) { |
833 |
hw_error("%d interrupt lines unsupported: not divisible by 32\n",
|
834 |
num_irq); |
835 |
} |
836 |
|
837 |
i = s->num_irq - GIC_INTERNAL; |
838 |
/* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
|
839 |
* GPIO array layout is thus:
|
840 |
* [0..N-1] SPIs
|
841 |
* [N..N+31] PPIs for CPU 0
|
842 |
* [N+32..N+63] PPIs for CPU 1
|
843 |
* ...
|
844 |
*/
|
845 |
if (s->revision != REV_NVIC) {
|
846 |
i += (GIC_INTERNAL * s->num_cpu); |
847 |
} |
848 |
qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, i); |
849 |
for (i = 0; i < NUM_CPU(s); i++) { |
850 |
sysbus_init_irq(&s->busdev, &s->parent_irq[i]); |
851 |
} |
852 |
memory_region_init_io(&s->iomem, &gic_dist_ops, s, "gic_dist", 0x1000); |
853 |
#ifndef NVIC
|
854 |
/* Memory regions for the CPU interfaces (NVIC doesn't have these):
|
855 |
* a region for "CPU interface for this core", then a region for
|
856 |
* "CPU interface for core 0", "for core 1", ...
|
857 |
* NB that the memory region size of 0x100 applies for the 11MPCore
|
858 |
* and also cores following the GIC v1 spec (ie A9).
|
859 |
* GIC v2 defines a larger memory region (0x1000) so this will need
|
860 |
* to be extended when we implement A15.
|
861 |
*/
|
862 |
memory_region_init_io(&s->cpuiomem[0], &gic_thiscpu_ops, s,
|
863 |
"gic_cpu", 0x100); |
864 |
for (i = 0; i < NUM_CPU(s); i++) { |
865 |
s->backref[i] = s; |
866 |
memory_region_init_io(&s->cpuiomem[i+1], &gic_cpu_ops, &s->backref[i],
|
867 |
"gic_cpu", 0x100); |
868 |
} |
869 |
#endif
|
870 |
|
871 |
register_savevm(NULL, "arm_gic", -1, 3, gic_save, gic_load, s); |
872 |
} |
873 |
|
874 |
#ifndef NVIC
|
875 |
|
876 |
static int arm_gic_init(SysBusDevice *dev) |
877 |
{ |
878 |
/* Device instance init function for the GIC sysbus device */
|
879 |
int i;
|
880 |
gic_state *s = FROM_SYSBUS(gic_state, dev); |
881 |
gic_init(s, s->num_irq); |
882 |
/* Distributor */
|
883 |
sysbus_init_mmio(dev, &s->iomem); |
884 |
/* cpu interfaces (one for "current cpu" plus one per cpu) */
|
885 |
for (i = 0; i <= NUM_CPU(s); i++) { |
886 |
sysbus_init_mmio(dev, &s->cpuiomem[i]); |
887 |
} |
888 |
return 0; |
889 |
} |
890 |
|
891 |
static Property arm_gic_properties[] = {
|
892 |
DEFINE_PROP_UINT32("num-cpu", gic_state, num_cpu, 1), |
893 |
DEFINE_PROP_UINT32("num-irq", gic_state, num_irq, 32), |
894 |
/* Revision can be 1 or 2 for GIC architecture specification
|
895 |
* versions 1 or 2, or 0 to indicate the legacy 11MPCore GIC.
|
896 |
* (Internally, 0xffffffff also indicates "not a GIC but an NVIC".)
|
897 |
*/
|
898 |
DEFINE_PROP_UINT32("revision", gic_state, revision, 1), |
899 |
DEFINE_PROP_END_OF_LIST(), |
900 |
}; |
901 |
|
902 |
static void arm_gic_class_init(ObjectClass *klass, void *data) |
903 |
{ |
904 |
DeviceClass *dc = DEVICE_CLASS(klass); |
905 |
SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); |
906 |
sbc->init = arm_gic_init; |
907 |
dc->props = arm_gic_properties; |
908 |
dc->reset = gic_reset; |
909 |
dc->no_user = 1;
|
910 |
} |
911 |
|
912 |
static TypeInfo arm_gic_info = {
|
913 |
.name = "arm_gic",
|
914 |
.parent = TYPE_SYS_BUS_DEVICE, |
915 |
.instance_size = sizeof(gic_state),
|
916 |
.class_init = arm_gic_class_init, |
917 |
}; |
918 |
|
919 |
static void arm_gic_register_types(void) |
920 |
{ |
921 |
type_register_static(&arm_gic_info); |
922 |
} |
923 |
|
924 |
type_init(arm_gic_register_types) |
925 |
|
926 |
#endif
|