root / hw / arm_gic.c @ 9c22a623
History | View | Annotate | Download (21.4 kB)
1 |
/*
|
---|---|
2 |
* ARM Generic/Distributed Interrupt Controller
|
3 |
*
|
4 |
* Copyright (c) 2006-2007 CodeSourcery.
|
5 |
* Written by Paul Brook
|
6 |
*
|
7 |
* This code is licenced under the GPL.
|
8 |
*/
|
9 |
|
10 |
/* This file contains implementation code for the RealView EB interrupt
|
11 |
controller, MPCore distributed interrupt controller and ARMv7-M
|
12 |
Nested Vectored Interrupt Controller. */
|
13 |
|
14 |
//#define DEBUG_GIC
|
15 |
|
16 |
#ifdef DEBUG_GIC
|
17 |
#define DPRINTF(fmt, args...) \
|
18 |
do { printf("arm_gic: " fmt , ##args); } while (0) |
19 |
#else
|
20 |
#define DPRINTF(fmt, args...) do {} while(0) |
21 |
#endif
|
22 |
|
23 |
#ifdef NVIC
|
24 |
static const uint8_t gic_id[] = |
25 |
{ 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 }; |
26 |
/* The NVIC has 16 internal vectors. However these are not exposed
|
27 |
through the normal GIC interface. */
|
28 |
#define GIC_BASE_IRQ 32 |
29 |
#else
|
30 |
static const uint8_t gic_id[] = |
31 |
{ 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; |
32 |
#define GIC_BASE_IRQ 0 |
33 |
#endif
|
34 |
|
35 |
typedef struct gic_irq_state |
36 |
{ |
37 |
/* ??? The documentation seems to imply the enable bits are global, even
|
38 |
for per-cpu interrupts. This seems strange. */
|
39 |
unsigned enabled:1; |
40 |
unsigned pending:NCPU;
|
41 |
unsigned active:NCPU;
|
42 |
unsigned level:NCPU;
|
43 |
unsigned model:1; /* 0 = N:N, 1 = 1:N */ |
44 |
unsigned trigger:1; /* nonzero = edge triggered. */ |
45 |
} gic_irq_state; |
46 |
|
47 |
#define ALL_CPU_MASK ((1 << NCPU) - 1) |
48 |
|
49 |
#define GIC_SET_ENABLED(irq) s->irq_state[irq].enabled = 1 |
50 |
#define GIC_CLEAR_ENABLED(irq) s->irq_state[irq].enabled = 0 |
51 |
#define GIC_TEST_ENABLED(irq) s->irq_state[irq].enabled
|
52 |
#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
|
53 |
#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
|
54 |
#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) |
55 |
#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
|
56 |
#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
|
57 |
#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) |
58 |
#define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 |
59 |
#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 |
60 |
#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
|
61 |
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
|
62 |
#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
|
63 |
#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) |
64 |
#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 |
65 |
#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 |
66 |
#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
|
67 |
#define GIC_GET_PRIORITY(irq, cpu) \
|
68 |
(((irq) < 32) ? s->priority1[irq][cpu] : s->priority2[(irq) - 32]) |
69 |
#ifdef NVIC
|
70 |
#define GIC_TARGET(irq) 1 |
71 |
#else
|
72 |
#define GIC_TARGET(irq) s->irq_target[irq]
|
73 |
#endif
|
74 |
|
75 |
typedef struct gic_state |
76 |
{ |
77 |
qemu_irq parent_irq[NCPU]; |
78 |
int enabled;
|
79 |
int cpu_enabled[NCPU];
|
80 |
|
81 |
gic_irq_state irq_state[GIC_NIRQ]; |
82 |
#ifndef NVIC
|
83 |
int irq_target[GIC_NIRQ];
|
84 |
#endif
|
85 |
int priority1[32][NCPU]; |
86 |
int priority2[GIC_NIRQ - 32]; |
87 |
int last_active[GIC_NIRQ][NCPU];
|
88 |
|
89 |
int priority_mask[NCPU];
|
90 |
int running_irq[NCPU];
|
91 |
int running_priority[NCPU];
|
92 |
int current_pending[NCPU];
|
93 |
|
94 |
qemu_irq *in; |
95 |
#ifdef NVIC
|
96 |
void *nvic;
|
97 |
#endif
|
98 |
} gic_state; |
99 |
|
100 |
/* TODO: Many places that call this routine could be optimized. */
|
101 |
/* Update interrupt status after enabled or pending bits have been changed. */
|
102 |
static void gic_update(gic_state *s) |
103 |
{ |
104 |
int best_irq;
|
105 |
int best_prio;
|
106 |
int irq;
|
107 |
int level;
|
108 |
int cpu;
|
109 |
int cm;
|
110 |
|
111 |
for (cpu = 0; cpu < NCPU; cpu++) { |
112 |
cm = 1 << cpu;
|
113 |
s->current_pending[cpu] = 1023;
|
114 |
if (!s->enabled || !s->cpu_enabled[cpu]) {
|
115 |
qemu_irq_lower(s->parent_irq[cpu]); |
116 |
return;
|
117 |
} |
118 |
best_prio = 0x100;
|
119 |
best_irq = 1023;
|
120 |
for (irq = 0; irq < GIC_NIRQ; irq++) { |
121 |
if (GIC_TEST_ENABLED(irq) && GIC_TEST_PENDING(irq, cm)) {
|
122 |
if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
|
123 |
best_prio = GIC_GET_PRIORITY(irq, cpu); |
124 |
best_irq = irq; |
125 |
} |
126 |
} |
127 |
} |
128 |
level = 0;
|
129 |
if (best_prio <= s->priority_mask[cpu]) {
|
130 |
s->current_pending[cpu] = best_irq; |
131 |
if (best_prio < s->running_priority[cpu]) {
|
132 |
DPRINTF("Raised pending IRQ %d\n", best_irq);
|
133 |
level = 1;
|
134 |
} |
135 |
} |
136 |
qemu_set_irq(s->parent_irq[cpu], level); |
137 |
} |
138 |
} |
139 |
|
140 |
static void __attribute__((unused)) |
141 |
gic_set_pending_private(gic_state *s, int cpu, int irq) |
142 |
{ |
143 |
int cm = 1 << cpu; |
144 |
|
145 |
if (GIC_TEST_PENDING(irq, cm))
|
146 |
return;
|
147 |
|
148 |
DPRINTF("Set %d pending cpu %d\n", irq, cpu);
|
149 |
GIC_SET_PENDING(irq, cm); |
150 |
gic_update(s); |
151 |
} |
152 |
|
153 |
/* Process a change in an external IRQ input. */
|
154 |
static void gic_set_irq(void *opaque, int irq, int level) |
155 |
{ |
156 |
gic_state *s = (gic_state *)opaque; |
157 |
/* The first external input line is internal interrupt 32. */
|
158 |
irq += 32;
|
159 |
if (level == GIC_TEST_LEVEL(irq, ALL_CPU_MASK))
|
160 |
return;
|
161 |
|
162 |
if (level) {
|
163 |
GIC_SET_LEVEL(irq, ALL_CPU_MASK); |
164 |
if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq)) {
|
165 |
DPRINTF("Set %d pending mask %x\n", irq, GIC_TARGET(irq));
|
166 |
GIC_SET_PENDING(irq, GIC_TARGET(irq)); |
167 |
} |
168 |
} else {
|
169 |
GIC_CLEAR_LEVEL(irq, ALL_CPU_MASK); |
170 |
} |
171 |
gic_update(s); |
172 |
} |
173 |
|
174 |
static void gic_set_running_irq(gic_state *s, int cpu, int irq) |
175 |
{ |
176 |
s->running_irq[cpu] = irq; |
177 |
if (irq == 1023) { |
178 |
s->running_priority[cpu] = 0x100;
|
179 |
} else {
|
180 |
s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); |
181 |
} |
182 |
gic_update(s); |
183 |
} |
184 |
|
185 |
static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) |
186 |
{ |
187 |
int new_irq;
|
188 |
int cm = 1 << cpu; |
189 |
new_irq = s->current_pending[cpu]; |
190 |
if (new_irq == 1023 |
191 |
|| GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) { |
192 |
DPRINTF("ACK no pending IRQ\n");
|
193 |
return 1023; |
194 |
} |
195 |
s->last_active[new_irq][cpu] = s->running_irq[cpu]; |
196 |
/* Clear pending flags for both level and edge triggered interrupts.
|
197 |
Level triggered IRQs will be reasserted once they become inactive. */
|
198 |
GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm); |
199 |
gic_set_running_irq(s, cpu, new_irq); |
200 |
DPRINTF("ACK %d\n", new_irq);
|
201 |
return new_irq;
|
202 |
} |
203 |
|
204 |
static void gic_complete_irq(gic_state * s, int cpu, int irq) |
205 |
{ |
206 |
int update = 0; |
207 |
int cm = 1 << cpu; |
208 |
DPRINTF("EOI %d\n", irq);
|
209 |
if (s->running_irq[cpu] == 1023) |
210 |
return; /* No active IRQ. */ |
211 |
if (irq != 1023) { |
212 |
/* Mark level triggered interrupts as pending if they are still
|
213 |
raised. */
|
214 |
if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq)
|
215 |
&& GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
|
216 |
DPRINTF("Set %d pending mask %x\n", irq, cm);
|
217 |
GIC_SET_PENDING(irq, cm); |
218 |
update = 1;
|
219 |
} |
220 |
} |
221 |
if (irq != s->running_irq[cpu]) {
|
222 |
/* Complete an IRQ that is not currently running. */
|
223 |
int tmp = s->running_irq[cpu];
|
224 |
while (s->last_active[tmp][cpu] != 1023) { |
225 |
if (s->last_active[tmp][cpu] == irq) {
|
226 |
s->last_active[tmp][cpu] = s->last_active[irq][cpu]; |
227 |
break;
|
228 |
} |
229 |
tmp = s->last_active[tmp][cpu]; |
230 |
} |
231 |
if (update) {
|
232 |
gic_update(s); |
233 |
} |
234 |
} else {
|
235 |
/* Complete the current running IRQ. */
|
236 |
gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); |
237 |
} |
238 |
} |
239 |
|
240 |
static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) |
241 |
{ |
242 |
gic_state *s = (gic_state *)opaque; |
243 |
uint32_t res; |
244 |
int irq;
|
245 |
int i;
|
246 |
int cpu;
|
247 |
int cm;
|
248 |
int mask;
|
249 |
|
250 |
cpu = gic_get_current_cpu(); |
251 |
cm = 1 << cpu;
|
252 |
if (offset < 0x100) { |
253 |
#ifndef NVIC
|
254 |
if (offset == 0) |
255 |
return s->enabled;
|
256 |
if (offset == 4) |
257 |
return ((GIC_NIRQ / 32) - 1) | ((NCPU - 1) << 5); |
258 |
if (offset < 0x08) |
259 |
return 0; |
260 |
#endif
|
261 |
goto bad_reg;
|
262 |
} else if (offset < 0x200) { |
263 |
/* Interrupt Set/Clear Enable. */
|
264 |
if (offset < 0x180) |
265 |
irq = (offset - 0x100) * 8; |
266 |
else
|
267 |
irq = (offset - 0x180) * 8; |
268 |
irq += GIC_BASE_IRQ; |
269 |
if (irq >= GIC_NIRQ)
|
270 |
goto bad_reg;
|
271 |
res = 0;
|
272 |
for (i = 0; i < 8; i++) { |
273 |
if (GIC_TEST_ENABLED(irq + i)) {
|
274 |
res |= (1 << i);
|
275 |
} |
276 |
} |
277 |
} else if (offset < 0x300) { |
278 |
/* Interrupt Set/Clear Pending. */
|
279 |
if (offset < 0x280) |
280 |
irq = (offset - 0x200) * 8; |
281 |
else
|
282 |
irq = (offset - 0x280) * 8; |
283 |
irq += GIC_BASE_IRQ; |
284 |
if (irq >= GIC_NIRQ)
|
285 |
goto bad_reg;
|
286 |
res = 0;
|
287 |
mask = (irq < 32) ? cm : ALL_CPU_MASK;
|
288 |
for (i = 0; i < 8; i++) { |
289 |
if (GIC_TEST_PENDING(irq + i, mask)) {
|
290 |
res |= (1 << i);
|
291 |
} |
292 |
} |
293 |
} else if (offset < 0x400) { |
294 |
/* Interrupt Active. */
|
295 |
irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; |
296 |
if (irq >= GIC_NIRQ)
|
297 |
goto bad_reg;
|
298 |
res = 0;
|
299 |
mask = (irq < 32) ? cm : ALL_CPU_MASK;
|
300 |
for (i = 0; i < 8; i++) { |
301 |
if (GIC_TEST_ACTIVE(irq + i, mask)) {
|
302 |
res |= (1 << i);
|
303 |
} |
304 |
} |
305 |
} else if (offset < 0x800) { |
306 |
/* Interrupt Priority. */
|
307 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
308 |
if (irq >= GIC_NIRQ)
|
309 |
goto bad_reg;
|
310 |
res = GIC_GET_PRIORITY(irq, cpu); |
311 |
#ifndef NVIC
|
312 |
} else if (offset < 0xc00) { |
313 |
/* Interrupt CPU Target. */
|
314 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
315 |
if (irq >= GIC_NIRQ)
|
316 |
goto bad_reg;
|
317 |
if (irq >= 29 && irq <= 31) { |
318 |
res = cm; |
319 |
} else {
|
320 |
res = GIC_TARGET(irq); |
321 |
} |
322 |
} else if (offset < 0xf00) { |
323 |
/* Interrupt Configuration. */
|
324 |
irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; |
325 |
if (irq >= GIC_NIRQ)
|
326 |
goto bad_reg;
|
327 |
res = 0;
|
328 |
for (i = 0; i < 4; i++) { |
329 |
if (GIC_TEST_MODEL(irq + i))
|
330 |
res |= (1 << (i * 2)); |
331 |
if (GIC_TEST_TRIGGER(irq + i))
|
332 |
res |= (2 << (i * 2)); |
333 |
} |
334 |
#endif
|
335 |
} else if (offset < 0xfe0) { |
336 |
goto bad_reg;
|
337 |
} else /* offset >= 0xfe0 */ { |
338 |
if (offset & 3) { |
339 |
res = 0;
|
340 |
} else {
|
341 |
res = gic_id[(offset - 0xfe0) >> 2]; |
342 |
} |
343 |
} |
344 |
return res;
|
345 |
bad_reg:
|
346 |
cpu_abort(cpu_single_env, "gic_dist_readb: Bad offset %x\n", (int)offset); |
347 |
return 0; |
348 |
} |
349 |
|
350 |
static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) |
351 |
{ |
352 |
uint32_t val; |
353 |
val = gic_dist_readb(opaque, offset); |
354 |
val |= gic_dist_readb(opaque, offset + 1) << 8; |
355 |
return val;
|
356 |
} |
357 |
|
358 |
static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) |
359 |
{ |
360 |
uint32_t val; |
361 |
#ifdef NVIC
|
362 |
gic_state *s = (gic_state *)opaque; |
363 |
uint32_t addr; |
364 |
addr = offset; |
365 |
if (addr < 0x100 || addr > 0xd00) |
366 |
return nvic_readl(s->nvic, addr);
|
367 |
#endif
|
368 |
val = gic_dist_readw(opaque, offset); |
369 |
val |= gic_dist_readw(opaque, offset + 2) << 16; |
370 |
return val;
|
371 |
} |
372 |
|
373 |
static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, |
374 |
uint32_t value) |
375 |
{ |
376 |
gic_state *s = (gic_state *)opaque; |
377 |
int irq;
|
378 |
int i;
|
379 |
int cpu;
|
380 |
|
381 |
cpu = gic_get_current_cpu(); |
382 |
if (offset < 0x100) { |
383 |
#ifdef NVIC
|
384 |
goto bad_reg;
|
385 |
#else
|
386 |
if (offset == 0) { |
387 |
s->enabled = (value & 1);
|
388 |
DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); |
389 |
} else if (offset < 4) { |
390 |
/* ignored. */
|
391 |
} else {
|
392 |
goto bad_reg;
|
393 |
} |
394 |
#endif
|
395 |
} else if (offset < 0x180) { |
396 |
/* Interrupt Set Enable. */
|
397 |
irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; |
398 |
if (irq >= GIC_NIRQ)
|
399 |
goto bad_reg;
|
400 |
if (irq < 16) |
401 |
value = 0xff;
|
402 |
for (i = 0; i < 8; i++) { |
403 |
if (value & (1 << i)) { |
404 |
int mask = (irq < 32) ? (1 << cpu) : GIC_TARGET(irq); |
405 |
if (!GIC_TEST_ENABLED(irq + i))
|
406 |
DPRINTF("Enabled IRQ %d\n", irq + i);
|
407 |
GIC_SET_ENABLED(irq + i); |
408 |
/* If a raised level triggered IRQ enabled then mark
|
409 |
is as pending. */
|
410 |
if (GIC_TEST_LEVEL(irq + i, mask)
|
411 |
&& !GIC_TEST_TRIGGER(irq + i)) { |
412 |
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
|
413 |
GIC_SET_PENDING(irq + i, mask); |
414 |
} |
415 |
} |
416 |
} |
417 |
} else if (offset < 0x200) { |
418 |
/* Interrupt Clear Enable. */
|
419 |
irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; |
420 |
if (irq >= GIC_NIRQ)
|
421 |
goto bad_reg;
|
422 |
if (irq < 16) |
423 |
value = 0;
|
424 |
for (i = 0; i < 8; i++) { |
425 |
if (value & (1 << i)) { |
426 |
if (GIC_TEST_ENABLED(irq + i))
|
427 |
DPRINTF("Disabled IRQ %d\n", irq + i);
|
428 |
GIC_CLEAR_ENABLED(irq + i); |
429 |
} |
430 |
} |
431 |
} else if (offset < 0x280) { |
432 |
/* Interrupt Set Pending. */
|
433 |
irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; |
434 |
if (irq >= GIC_NIRQ)
|
435 |
goto bad_reg;
|
436 |
if (irq < 16) |
437 |
irq = 0;
|
438 |
|
439 |
for (i = 0; i < 8; i++) { |
440 |
if (value & (1 << i)) { |
441 |
GIC_SET_PENDING(irq + i, GIC_TARGET(irq)); |
442 |
} |
443 |
} |
444 |
} else if (offset < 0x300) { |
445 |
/* Interrupt Clear Pending. */
|
446 |
irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; |
447 |
if (irq >= GIC_NIRQ)
|
448 |
goto bad_reg;
|
449 |
for (i = 0; i < 8; i++) { |
450 |
/* ??? This currently clears the pending bit for all CPUs, even
|
451 |
for per-CPU interrupts. It's unclear whether this is the
|
452 |
corect behavior. */
|
453 |
if (value & (1 << i)) { |
454 |
GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); |
455 |
} |
456 |
} |
457 |
} else if (offset < 0x400) { |
458 |
/* Interrupt Active. */
|
459 |
goto bad_reg;
|
460 |
} else if (offset < 0x800) { |
461 |
/* Interrupt Priority. */
|
462 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
463 |
if (irq >= GIC_NIRQ)
|
464 |
goto bad_reg;
|
465 |
if (irq < 32) { |
466 |
s->priority1[irq][cpu] = value; |
467 |
} else {
|
468 |
s->priority2[irq - 32] = value;
|
469 |
} |
470 |
#ifndef NVIC
|
471 |
} else if (offset < 0xc00) { |
472 |
/* Interrupt CPU Target. */
|
473 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
474 |
if (irq >= GIC_NIRQ)
|
475 |
goto bad_reg;
|
476 |
if (irq < 29) |
477 |
value = 0;
|
478 |
else if (irq < 32) |
479 |
value = ALL_CPU_MASK; |
480 |
s->irq_target[irq] = value & ALL_CPU_MASK; |
481 |
} else if (offset < 0xf00) { |
482 |
/* Interrupt Configuration. */
|
483 |
irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; |
484 |
if (irq >= GIC_NIRQ)
|
485 |
goto bad_reg;
|
486 |
if (irq < 32) |
487 |
value |= 0xaa;
|
488 |
for (i = 0; i < 4; i++) { |
489 |
if (value & (1 << (i * 2))) { |
490 |
GIC_SET_MODEL(irq + i); |
491 |
} else {
|
492 |
GIC_CLEAR_MODEL(irq + i); |
493 |
} |
494 |
if (value & (2 << (i * 2))) { |
495 |
GIC_SET_TRIGGER(irq + i); |
496 |
} else {
|
497 |
GIC_CLEAR_TRIGGER(irq + i); |
498 |
} |
499 |
} |
500 |
#endif
|
501 |
} else {
|
502 |
/* 0xf00 is only handled for 32-bit writes. */
|
503 |
goto bad_reg;
|
504 |
} |
505 |
gic_update(s); |
506 |
return;
|
507 |
bad_reg:
|
508 |
cpu_abort(cpu_single_env, "gic_dist_writeb: Bad offset %x\n", (int)offset); |
509 |
} |
510 |
|
511 |
static void gic_dist_writew(void *opaque, target_phys_addr_t offset, |
512 |
uint32_t value) |
513 |
{ |
514 |
gic_dist_writeb(opaque, offset, value & 0xff);
|
515 |
gic_dist_writeb(opaque, offset + 1, value >> 8); |
516 |
} |
517 |
|
518 |
static void gic_dist_writel(void *opaque, target_phys_addr_t offset, |
519 |
uint32_t value) |
520 |
{ |
521 |
gic_state *s = (gic_state *)opaque; |
522 |
#ifdef NVIC
|
523 |
uint32_t addr; |
524 |
addr = offset; |
525 |
if (addr < 0x100 || (addr > 0xd00 && addr != 0xf00)) { |
526 |
nvic_writel(s->nvic, addr, value); |
527 |
return;
|
528 |
} |
529 |
#endif
|
530 |
if (offset == 0xf00) { |
531 |
int cpu;
|
532 |
int irq;
|
533 |
int mask;
|
534 |
|
535 |
cpu = gic_get_current_cpu(); |
536 |
irq = value & 0x3ff;
|
537 |
switch ((value >> 24) & 3) { |
538 |
case 0: |
539 |
mask = (value >> 16) & ALL_CPU_MASK;
|
540 |
break;
|
541 |
case 1: |
542 |
mask = 1 << cpu;
|
543 |
break;
|
544 |
case 2: |
545 |
mask = ALL_CPU_MASK ^ (1 << cpu);
|
546 |
break;
|
547 |
default:
|
548 |
DPRINTF("Bad Soft Int target filter\n");
|
549 |
mask = ALL_CPU_MASK; |
550 |
break;
|
551 |
} |
552 |
GIC_SET_PENDING(irq, mask); |
553 |
gic_update(s); |
554 |
return;
|
555 |
} |
556 |
gic_dist_writew(opaque, offset, value & 0xffff);
|
557 |
gic_dist_writew(opaque, offset + 2, value >> 16); |
558 |
} |
559 |
|
560 |
static CPUReadMemoryFunc *gic_dist_readfn[] = {
|
561 |
gic_dist_readb, |
562 |
gic_dist_readw, |
563 |
gic_dist_readl |
564 |
}; |
565 |
|
566 |
static CPUWriteMemoryFunc *gic_dist_writefn[] = {
|
567 |
gic_dist_writeb, |
568 |
gic_dist_writew, |
569 |
gic_dist_writel |
570 |
}; |
571 |
|
572 |
#ifndef NVIC
|
573 |
static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset) |
574 |
{ |
575 |
switch (offset) {
|
576 |
case 0x00: /* Control */ |
577 |
return s->cpu_enabled[cpu];
|
578 |
case 0x04: /* Priority mask */ |
579 |
return s->priority_mask[cpu];
|
580 |
case 0x08: /* Binary Point */ |
581 |
/* ??? Not implemented. */
|
582 |
return 0; |
583 |
case 0x0c: /* Acknowledge */ |
584 |
return gic_acknowledge_irq(s, cpu);
|
585 |
case 0x14: /* Runing Priority */ |
586 |
return s->running_priority[cpu];
|
587 |
case 0x18: /* Highest Pending Interrupt */ |
588 |
return s->current_pending[cpu];
|
589 |
default:
|
590 |
cpu_abort(cpu_single_env, "gic_cpu_read: Bad offset %x\n",
|
591 |
(int)offset);
|
592 |
return 0; |
593 |
} |
594 |
} |
595 |
|
596 |
static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value) |
597 |
{ |
598 |
switch (offset) {
|
599 |
case 0x00: /* Control */ |
600 |
s->cpu_enabled[cpu] = (value & 1);
|
601 |
DPRINTF("CPU %sabled\n", s->cpu_enabled ? "En" : "Dis"); |
602 |
break;
|
603 |
case 0x04: /* Priority mask */ |
604 |
s->priority_mask[cpu] = (value & 0xff);
|
605 |
break;
|
606 |
case 0x08: /* Binary Point */ |
607 |
/* ??? Not implemented. */
|
608 |
break;
|
609 |
case 0x10: /* End Of Interrupt */ |
610 |
return gic_complete_irq(s, cpu, value & 0x3ff); |
611 |
default:
|
612 |
cpu_abort(cpu_single_env, "gic_cpu_write: Bad offset %x\n",
|
613 |
(int)offset);
|
614 |
return;
|
615 |
} |
616 |
gic_update(s); |
617 |
} |
618 |
#endif
|
619 |
|
620 |
static void gic_reset(gic_state *s) |
621 |
{ |
622 |
int i;
|
623 |
memset(s->irq_state, 0, GIC_NIRQ * sizeof(gic_irq_state)); |
624 |
for (i = 0 ; i < NCPU; i++) { |
625 |
s->priority_mask[i] = 0xf0;
|
626 |
s->current_pending[i] = 1023;
|
627 |
s->running_irq[i] = 1023;
|
628 |
s->running_priority[i] = 0x100;
|
629 |
#ifdef NVIC
|
630 |
/* The NVIC doesn't have per-cpu interfaces, so enable by default. */
|
631 |
s->cpu_enabled[i] = 1;
|
632 |
#else
|
633 |
s->cpu_enabled[i] = 0;
|
634 |
#endif
|
635 |
} |
636 |
for (i = 0; i < 16; i++) { |
637 |
GIC_SET_ENABLED(i); |
638 |
GIC_SET_TRIGGER(i); |
639 |
} |
640 |
#ifdef NVIC
|
641 |
/* The NVIC is always enabled. */
|
642 |
s->enabled = 1;
|
643 |
#else
|
644 |
s->enabled = 0;
|
645 |
#endif
|
646 |
} |
647 |
|
648 |
static void gic_save(QEMUFile *f, void *opaque) |
649 |
{ |
650 |
gic_state *s = (gic_state *)opaque; |
651 |
int i;
|
652 |
int j;
|
653 |
|
654 |
qemu_put_be32(f, s->enabled); |
655 |
for (i = 0; i < NCPU; i++) { |
656 |
qemu_put_be32(f, s->cpu_enabled[i]); |
657 |
#ifndef NVIC
|
658 |
qemu_put_be32(f, s->irq_target[i]); |
659 |
#endif
|
660 |
for (j = 0; j < 32; j++) |
661 |
qemu_put_be32(f, s->priority1[j][i]); |
662 |
for (j = 0; j < GIC_NIRQ; j++) |
663 |
qemu_put_be32(f, s->last_active[j][i]); |
664 |
qemu_put_be32(f, s->priority_mask[i]); |
665 |
qemu_put_be32(f, s->running_irq[i]); |
666 |
qemu_put_be32(f, s->running_priority[i]); |
667 |
qemu_put_be32(f, s->current_pending[i]); |
668 |
} |
669 |
for (i = 0; i < GIC_NIRQ - 32; i++) { |
670 |
qemu_put_be32(f, s->priority2[i]); |
671 |
} |
672 |
for (i = 0; i < GIC_NIRQ; i++) { |
673 |
qemu_put_byte(f, s->irq_state[i].enabled); |
674 |
qemu_put_byte(f, s->irq_state[i].pending); |
675 |
qemu_put_byte(f, s->irq_state[i].active); |
676 |
qemu_put_byte(f, s->irq_state[i].level); |
677 |
qemu_put_byte(f, s->irq_state[i].model); |
678 |
qemu_put_byte(f, s->irq_state[i].trigger); |
679 |
} |
680 |
} |
681 |
|
682 |
static int gic_load(QEMUFile *f, void *opaque, int version_id) |
683 |
{ |
684 |
gic_state *s = (gic_state *)opaque; |
685 |
int i;
|
686 |
int j;
|
687 |
|
688 |
if (version_id != 1) |
689 |
return -EINVAL;
|
690 |
|
691 |
s->enabled = qemu_get_be32(f); |
692 |
for (i = 0; i < NCPU; i++) { |
693 |
s->cpu_enabled[i] = qemu_get_be32(f); |
694 |
#ifndef NVIC
|
695 |
s->irq_target[i] = qemu_get_be32(f); |
696 |
#endif
|
697 |
for (j = 0; j < 32; j++) |
698 |
s->priority1[j][i] = qemu_get_be32(f); |
699 |
for (j = 0; j < GIC_NIRQ; j++) |
700 |
s->last_active[j][i] = qemu_get_be32(f); |
701 |
s->priority_mask[i] = qemu_get_be32(f); |
702 |
s->running_irq[i] = qemu_get_be32(f); |
703 |
s->running_priority[i] = qemu_get_be32(f); |
704 |
s->current_pending[i] = qemu_get_be32(f); |
705 |
} |
706 |
for (i = 0; i < GIC_NIRQ - 32; i++) { |
707 |
s->priority2[i] = qemu_get_be32(f); |
708 |
} |
709 |
for (i = 0; i < GIC_NIRQ; i++) { |
710 |
s->irq_state[i].enabled = qemu_get_byte(f); |
711 |
s->irq_state[i].pending = qemu_get_byte(f); |
712 |
s->irq_state[i].active = qemu_get_byte(f); |
713 |
s->irq_state[i].level = qemu_get_byte(f); |
714 |
s->irq_state[i].model = qemu_get_byte(f); |
715 |
s->irq_state[i].trigger = qemu_get_byte(f); |
716 |
} |
717 |
|
718 |
return 0; |
719 |
} |
720 |
|
721 |
static gic_state *gic_init(uint32_t dist_base, qemu_irq *parent_irq)
|
722 |
{ |
723 |
gic_state *s; |
724 |
int iomemtype;
|
725 |
int i;
|
726 |
|
727 |
s = (gic_state *)qemu_mallocz(sizeof(gic_state));
|
728 |
s->in = qemu_allocate_irqs(gic_set_irq, s, GIC_NIRQ); |
729 |
for (i = 0; i < NCPU; i++) { |
730 |
s->parent_irq[i] = parent_irq[i]; |
731 |
} |
732 |
iomemtype = cpu_register_io_memory(0, gic_dist_readfn,
|
733 |
gic_dist_writefn, s); |
734 |
cpu_register_physical_memory(dist_base, 0x00001000,
|
735 |
iomemtype); |
736 |
gic_reset(s); |
737 |
register_savevm("arm_gic", -1, 1, gic_save, gic_load, s); |
738 |
return s;
|
739 |
} |