root / hw / arm_gic.c @ 795928f6
History | View | Annotate | Download (21.7 kB)
1 |
/*
|
---|---|
2 |
* ARM Generic/Distributed Interrupt Controller
|
3 |
*
|
4 |
* Copyright (c) 2006-2007 CodeSourcery.
|
5 |
* Written by Paul Brook
|
6 |
*
|
7 |
* This code is licensed under the GPL.
|
8 |
*/
|
9 |
|
10 |
/* This file contains implementation code for the RealView EB interrupt
|
11 |
controller, MPCore distributed interrupt controller and ARMv7-M
|
12 |
Nested Vectored Interrupt Controller. */
|
13 |
|
14 |
//#define DEBUG_GIC
|
15 |
|
16 |
#ifdef DEBUG_GIC
|
17 |
#define DPRINTF(fmt, ...) \
|
18 |
do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0) |
19 |
#else
|
20 |
#define DPRINTF(fmt, ...) do {} while(0) |
21 |
#endif
|
22 |
|
23 |
#ifdef NVIC
|
24 |
static const uint8_t gic_id[] = |
25 |
{ 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 }; |
26 |
/* The NVIC has 16 internal vectors. However these are not exposed
|
27 |
through the normal GIC interface. */
|
28 |
#define GIC_BASE_IRQ 32 |
29 |
#else
|
30 |
static const uint8_t gic_id[] = |
31 |
{ 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; |
32 |
#define GIC_BASE_IRQ 0 |
33 |
#endif
|
34 |
|
35 |
#define FROM_SYSBUSGIC(type, dev) \
|
36 |
DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev)) |
37 |
|
38 |
typedef struct gic_irq_state |
39 |
{ |
40 |
/* The enable bits are only banked for per-cpu interrupts. */
|
41 |
unsigned enabled:NCPU;
|
42 |
unsigned pending:NCPU;
|
43 |
unsigned active:NCPU;
|
44 |
unsigned level:NCPU;
|
45 |
unsigned model:1; /* 0 = N:N, 1 = 1:N */ |
46 |
unsigned trigger:1; /* nonzero = edge triggered. */ |
47 |
} gic_irq_state; |
48 |
|
49 |
#define ALL_CPU_MASK ((1 << NCPU) - 1) |
50 |
#if NCPU > 1 |
51 |
#define NUM_CPU(s) ((s)->num_cpu)
|
52 |
#else
|
53 |
#define NUM_CPU(s) 1 |
54 |
#endif
|
55 |
|
56 |
#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
|
57 |
#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
|
58 |
#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0) |
59 |
#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
|
60 |
#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
|
61 |
#define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) |
62 |
#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
|
63 |
#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
|
64 |
#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) |
65 |
#define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 |
66 |
#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 |
67 |
#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
|
68 |
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
|
69 |
#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
|
70 |
#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) |
71 |
#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 |
72 |
#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 |
73 |
#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
|
74 |
#define GIC_GET_PRIORITY(irq, cpu) \
|
75 |
(((irq) < 32) ? s->priority1[irq][cpu] : s->priority2[(irq) - 32]) |
76 |
#ifdef NVIC
|
77 |
#define GIC_TARGET(irq) 1 |
78 |
#else
|
79 |
#define GIC_TARGET(irq) s->irq_target[irq]
|
80 |
#endif
|
81 |
|
82 |
typedef struct gic_state |
83 |
{ |
84 |
SysBusDevice busdev; |
85 |
qemu_irq parent_irq[NCPU]; |
86 |
int enabled;
|
87 |
int cpu_enabled[NCPU];
|
88 |
|
89 |
gic_irq_state irq_state[GIC_NIRQ]; |
90 |
#ifndef NVIC
|
91 |
int irq_target[GIC_NIRQ];
|
92 |
#endif
|
93 |
int priority1[32][NCPU]; |
94 |
int priority2[GIC_NIRQ - 32]; |
95 |
int last_active[GIC_NIRQ][NCPU];
|
96 |
|
97 |
int priority_mask[NCPU];
|
98 |
int running_irq[NCPU];
|
99 |
int running_priority[NCPU];
|
100 |
int current_pending[NCPU];
|
101 |
|
102 |
#if NCPU > 1 |
103 |
int num_cpu;
|
104 |
#endif
|
105 |
|
106 |
MemoryRegion iomem; |
107 |
} gic_state; |
108 |
|
109 |
/* TODO: Many places that call this routine could be optimized. */
|
110 |
/* Update interrupt status after enabled or pending bits have been changed. */
|
111 |
static void gic_update(gic_state *s) |
112 |
{ |
113 |
int best_irq;
|
114 |
int best_prio;
|
115 |
int irq;
|
116 |
int level;
|
117 |
int cpu;
|
118 |
int cm;
|
119 |
|
120 |
for (cpu = 0; cpu < NUM_CPU(s); cpu++) { |
121 |
cm = 1 << cpu;
|
122 |
s->current_pending[cpu] = 1023;
|
123 |
if (!s->enabled || !s->cpu_enabled[cpu]) {
|
124 |
qemu_irq_lower(s->parent_irq[cpu]); |
125 |
return;
|
126 |
} |
127 |
best_prio = 0x100;
|
128 |
best_irq = 1023;
|
129 |
for (irq = 0; irq < GIC_NIRQ; irq++) { |
130 |
if (GIC_TEST_ENABLED(irq, cm) && GIC_TEST_PENDING(irq, cm)) {
|
131 |
if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
|
132 |
best_prio = GIC_GET_PRIORITY(irq, cpu); |
133 |
best_irq = irq; |
134 |
} |
135 |
} |
136 |
} |
137 |
level = 0;
|
138 |
if (best_prio <= s->priority_mask[cpu]) {
|
139 |
s->current_pending[cpu] = best_irq; |
140 |
if (best_prio < s->running_priority[cpu]) {
|
141 |
DPRINTF("Raised pending IRQ %d\n", best_irq);
|
142 |
level = 1;
|
143 |
} |
144 |
} |
145 |
qemu_set_irq(s->parent_irq[cpu], level); |
146 |
} |
147 |
} |
148 |
|
149 |
static void __attribute__((unused)) |
150 |
gic_set_pending_private(gic_state *s, int cpu, int irq) |
151 |
{ |
152 |
int cm = 1 << cpu; |
153 |
|
154 |
if (GIC_TEST_PENDING(irq, cm))
|
155 |
return;
|
156 |
|
157 |
DPRINTF("Set %d pending cpu %d\n", irq, cpu);
|
158 |
GIC_SET_PENDING(irq, cm); |
159 |
gic_update(s); |
160 |
} |
161 |
|
162 |
/* Process a change in an external IRQ input. */
|
163 |
static void gic_set_irq(void *opaque, int irq, int level) |
164 |
{ |
165 |
gic_state *s = (gic_state *)opaque; |
166 |
/* The first external input line is internal interrupt 32. */
|
167 |
irq += 32;
|
168 |
if (level == GIC_TEST_LEVEL(irq, ALL_CPU_MASK))
|
169 |
return;
|
170 |
|
171 |
if (level) {
|
172 |
GIC_SET_LEVEL(irq, ALL_CPU_MASK); |
173 |
if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, ALL_CPU_MASK)) {
|
174 |
DPRINTF("Set %d pending mask %x\n", irq, GIC_TARGET(irq));
|
175 |
GIC_SET_PENDING(irq, GIC_TARGET(irq)); |
176 |
} |
177 |
} else {
|
178 |
GIC_CLEAR_LEVEL(irq, ALL_CPU_MASK); |
179 |
} |
180 |
gic_update(s); |
181 |
} |
182 |
|
183 |
static void gic_set_running_irq(gic_state *s, int cpu, int irq) |
184 |
{ |
185 |
s->running_irq[cpu] = irq; |
186 |
if (irq == 1023) { |
187 |
s->running_priority[cpu] = 0x100;
|
188 |
} else {
|
189 |
s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); |
190 |
} |
191 |
gic_update(s); |
192 |
} |
193 |
|
194 |
static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) |
195 |
{ |
196 |
int new_irq;
|
197 |
int cm = 1 << cpu; |
198 |
new_irq = s->current_pending[cpu]; |
199 |
if (new_irq == 1023 |
200 |
|| GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) { |
201 |
DPRINTF("ACK no pending IRQ\n");
|
202 |
return 1023; |
203 |
} |
204 |
s->last_active[new_irq][cpu] = s->running_irq[cpu]; |
205 |
/* Clear pending flags for both level and edge triggered interrupts.
|
206 |
Level triggered IRQs will be reasserted once they become inactive. */
|
207 |
GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm); |
208 |
gic_set_running_irq(s, cpu, new_irq); |
209 |
DPRINTF("ACK %d\n", new_irq);
|
210 |
return new_irq;
|
211 |
} |
212 |
|
213 |
static void gic_complete_irq(gic_state * s, int cpu, int irq) |
214 |
{ |
215 |
int update = 0; |
216 |
int cm = 1 << cpu; |
217 |
DPRINTF("EOI %d\n", irq);
|
218 |
if (s->running_irq[cpu] == 1023) |
219 |
return; /* No active IRQ. */ |
220 |
if (irq != 1023) { |
221 |
/* Mark level triggered interrupts as pending if they are still
|
222 |
raised. */
|
223 |
if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
|
224 |
&& GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
|
225 |
DPRINTF("Set %d pending mask %x\n", irq, cm);
|
226 |
GIC_SET_PENDING(irq, cm); |
227 |
update = 1;
|
228 |
} |
229 |
} |
230 |
if (irq != s->running_irq[cpu]) {
|
231 |
/* Complete an IRQ that is not currently running. */
|
232 |
int tmp = s->running_irq[cpu];
|
233 |
while (s->last_active[tmp][cpu] != 1023) { |
234 |
if (s->last_active[tmp][cpu] == irq) {
|
235 |
s->last_active[tmp][cpu] = s->last_active[irq][cpu]; |
236 |
break;
|
237 |
} |
238 |
tmp = s->last_active[tmp][cpu]; |
239 |
} |
240 |
if (update) {
|
241 |
gic_update(s); |
242 |
} |
243 |
} else {
|
244 |
/* Complete the current running IRQ. */
|
245 |
gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); |
246 |
} |
247 |
} |
248 |
|
249 |
static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) |
250 |
{ |
251 |
gic_state *s = (gic_state *)opaque; |
252 |
uint32_t res; |
253 |
int irq;
|
254 |
int i;
|
255 |
int cpu;
|
256 |
int cm;
|
257 |
int mask;
|
258 |
|
259 |
cpu = gic_get_current_cpu(); |
260 |
cm = 1 << cpu;
|
261 |
if (offset < 0x100) { |
262 |
#ifndef NVIC
|
263 |
if (offset == 0) |
264 |
return s->enabled;
|
265 |
if (offset == 4) |
266 |
return ((GIC_NIRQ / 32) - 1) | ((NUM_CPU(s) - 1) << 5); |
267 |
if (offset < 0x08) |
268 |
return 0; |
269 |
#endif
|
270 |
goto bad_reg;
|
271 |
} else if (offset < 0x200) { |
272 |
/* Interrupt Set/Clear Enable. */
|
273 |
if (offset < 0x180) |
274 |
irq = (offset - 0x100) * 8; |
275 |
else
|
276 |
irq = (offset - 0x180) * 8; |
277 |
irq += GIC_BASE_IRQ; |
278 |
if (irq >= GIC_NIRQ)
|
279 |
goto bad_reg;
|
280 |
res = 0;
|
281 |
for (i = 0; i < 8; i++) { |
282 |
if (GIC_TEST_ENABLED(irq + i, cm)) {
|
283 |
res |= (1 << i);
|
284 |
} |
285 |
} |
286 |
} else if (offset < 0x300) { |
287 |
/* Interrupt Set/Clear Pending. */
|
288 |
if (offset < 0x280) |
289 |
irq = (offset - 0x200) * 8; |
290 |
else
|
291 |
irq = (offset - 0x280) * 8; |
292 |
irq += GIC_BASE_IRQ; |
293 |
if (irq >= GIC_NIRQ)
|
294 |
goto bad_reg;
|
295 |
res = 0;
|
296 |
mask = (irq < 32) ? cm : ALL_CPU_MASK;
|
297 |
for (i = 0; i < 8; i++) { |
298 |
if (GIC_TEST_PENDING(irq + i, mask)) {
|
299 |
res |= (1 << i);
|
300 |
} |
301 |
} |
302 |
} else if (offset < 0x400) { |
303 |
/* Interrupt Active. */
|
304 |
irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; |
305 |
if (irq >= GIC_NIRQ)
|
306 |
goto bad_reg;
|
307 |
res = 0;
|
308 |
mask = (irq < 32) ? cm : ALL_CPU_MASK;
|
309 |
for (i = 0; i < 8; i++) { |
310 |
if (GIC_TEST_ACTIVE(irq + i, mask)) {
|
311 |
res |= (1 << i);
|
312 |
} |
313 |
} |
314 |
} else if (offset < 0x800) { |
315 |
/* Interrupt Priority. */
|
316 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
317 |
if (irq >= GIC_NIRQ)
|
318 |
goto bad_reg;
|
319 |
res = GIC_GET_PRIORITY(irq, cpu); |
320 |
#ifndef NVIC
|
321 |
} else if (offset < 0xc00) { |
322 |
/* Interrupt CPU Target. */
|
323 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
324 |
if (irq >= GIC_NIRQ)
|
325 |
goto bad_reg;
|
326 |
if (irq >= 29 && irq <= 31) { |
327 |
res = cm; |
328 |
} else {
|
329 |
res = GIC_TARGET(irq); |
330 |
} |
331 |
} else if (offset < 0xf00) { |
332 |
/* Interrupt Configuration. */
|
333 |
irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; |
334 |
if (irq >= GIC_NIRQ)
|
335 |
goto bad_reg;
|
336 |
res = 0;
|
337 |
for (i = 0; i < 4; i++) { |
338 |
if (GIC_TEST_MODEL(irq + i))
|
339 |
res |= (1 << (i * 2)); |
340 |
if (GIC_TEST_TRIGGER(irq + i))
|
341 |
res |= (2 << (i * 2)); |
342 |
} |
343 |
#endif
|
344 |
} else if (offset < 0xfe0) { |
345 |
goto bad_reg;
|
346 |
} else /* offset >= 0xfe0 */ { |
347 |
if (offset & 3) { |
348 |
res = 0;
|
349 |
} else {
|
350 |
res = gic_id[(offset - 0xfe0) >> 2]; |
351 |
} |
352 |
} |
353 |
return res;
|
354 |
bad_reg:
|
355 |
hw_error("gic_dist_readb: Bad offset %x\n", (int)offset); |
356 |
return 0; |
357 |
} |
358 |
|
359 |
static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) |
360 |
{ |
361 |
uint32_t val; |
362 |
val = gic_dist_readb(opaque, offset); |
363 |
val |= gic_dist_readb(opaque, offset + 1) << 8; |
364 |
return val;
|
365 |
} |
366 |
|
367 |
static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) |
368 |
{ |
369 |
uint32_t val; |
370 |
#ifdef NVIC
|
371 |
gic_state *s = (gic_state *)opaque; |
372 |
uint32_t addr; |
373 |
addr = offset; |
374 |
if (addr < 0x100 || addr > 0xd00) |
375 |
return nvic_readl(s, addr);
|
376 |
#endif
|
377 |
val = gic_dist_readw(opaque, offset); |
378 |
val |= gic_dist_readw(opaque, offset + 2) << 16; |
379 |
return val;
|
380 |
} |
381 |
|
382 |
static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, |
383 |
uint32_t value) |
384 |
{ |
385 |
gic_state *s = (gic_state *)opaque; |
386 |
int irq;
|
387 |
int i;
|
388 |
int cpu;
|
389 |
|
390 |
cpu = gic_get_current_cpu(); |
391 |
if (offset < 0x100) { |
392 |
#ifdef NVIC
|
393 |
goto bad_reg;
|
394 |
#else
|
395 |
if (offset == 0) { |
396 |
s->enabled = (value & 1);
|
397 |
DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); |
398 |
} else if (offset < 4) { |
399 |
/* ignored. */
|
400 |
} else {
|
401 |
goto bad_reg;
|
402 |
} |
403 |
#endif
|
404 |
} else if (offset < 0x180) { |
405 |
/* Interrupt Set Enable. */
|
406 |
irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; |
407 |
if (irq >= GIC_NIRQ)
|
408 |
goto bad_reg;
|
409 |
if (irq < 16) |
410 |
value = 0xff;
|
411 |
for (i = 0; i < 8; i++) { |
412 |
if (value & (1 << i)) { |
413 |
int mask = (irq < 32) ? (1 << cpu) : GIC_TARGET(irq); |
414 |
int cm = (irq < 32) ? (1 << cpu) : ALL_CPU_MASK; |
415 |
|
416 |
if (!GIC_TEST_ENABLED(irq + i, cm)) {
|
417 |
DPRINTF("Enabled IRQ %d\n", irq + i);
|
418 |
} |
419 |
GIC_SET_ENABLED(irq + i, cm); |
420 |
/* If a raised level triggered IRQ enabled then mark
|
421 |
is as pending. */
|
422 |
if (GIC_TEST_LEVEL(irq + i, mask)
|
423 |
&& !GIC_TEST_TRIGGER(irq + i)) { |
424 |
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
|
425 |
GIC_SET_PENDING(irq + i, mask); |
426 |
} |
427 |
} |
428 |
} |
429 |
} else if (offset < 0x200) { |
430 |
/* Interrupt Clear Enable. */
|
431 |
irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; |
432 |
if (irq >= GIC_NIRQ)
|
433 |
goto bad_reg;
|
434 |
if (irq < 16) |
435 |
value = 0;
|
436 |
for (i = 0; i < 8; i++) { |
437 |
if (value & (1 << i)) { |
438 |
int cm = (irq < 32) ? (1 << cpu) : ALL_CPU_MASK; |
439 |
|
440 |
if (GIC_TEST_ENABLED(irq + i, cm)) {
|
441 |
DPRINTF("Disabled IRQ %d\n", irq + i);
|
442 |
} |
443 |
GIC_CLEAR_ENABLED(irq + i, cm); |
444 |
} |
445 |
} |
446 |
} else if (offset < 0x280) { |
447 |
/* Interrupt Set Pending. */
|
448 |
irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; |
449 |
if (irq >= GIC_NIRQ)
|
450 |
goto bad_reg;
|
451 |
if (irq < 16) |
452 |
irq = 0;
|
453 |
|
454 |
for (i = 0; i < 8; i++) { |
455 |
if (value & (1 << i)) { |
456 |
GIC_SET_PENDING(irq + i, GIC_TARGET(irq)); |
457 |
} |
458 |
} |
459 |
} else if (offset < 0x300) { |
460 |
/* Interrupt Clear Pending. */
|
461 |
irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; |
462 |
if (irq >= GIC_NIRQ)
|
463 |
goto bad_reg;
|
464 |
for (i = 0; i < 8; i++) { |
465 |
/* ??? This currently clears the pending bit for all CPUs, even
|
466 |
for per-CPU interrupts. It's unclear whether this is the
|
467 |
corect behavior. */
|
468 |
if (value & (1 << i)) { |
469 |
GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); |
470 |
} |
471 |
} |
472 |
} else if (offset < 0x400) { |
473 |
/* Interrupt Active. */
|
474 |
goto bad_reg;
|
475 |
} else if (offset < 0x800) { |
476 |
/* Interrupt Priority. */
|
477 |
irq = (offset - 0x400) + GIC_BASE_IRQ;
|
478 |
if (irq >= GIC_NIRQ)
|
479 |
goto bad_reg;
|
480 |
if (irq < 32) { |
481 |
s->priority1[irq][cpu] = value; |
482 |
} else {
|
483 |
s->priority2[irq - 32] = value;
|
484 |
} |
485 |
#ifndef NVIC
|
486 |
} else if (offset < 0xc00) { |
487 |
/* Interrupt CPU Target. */
|
488 |
irq = (offset - 0x800) + GIC_BASE_IRQ;
|
489 |
if (irq >= GIC_NIRQ)
|
490 |
goto bad_reg;
|
491 |
if (irq < 29) |
492 |
value = 0;
|
493 |
else if (irq < 32) |
494 |
value = ALL_CPU_MASK; |
495 |
s->irq_target[irq] = value & ALL_CPU_MASK; |
496 |
} else if (offset < 0xf00) { |
497 |
/* Interrupt Configuration. */
|
498 |
irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; |
499 |
if (irq >= GIC_NIRQ)
|
500 |
goto bad_reg;
|
501 |
if (irq < 32) |
502 |
value |= 0xaa;
|
503 |
for (i = 0; i < 4; i++) { |
504 |
if (value & (1 << (i * 2))) { |
505 |
GIC_SET_MODEL(irq + i); |
506 |
} else {
|
507 |
GIC_CLEAR_MODEL(irq + i); |
508 |
} |
509 |
if (value & (2 << (i * 2))) { |
510 |
GIC_SET_TRIGGER(irq + i); |
511 |
} else {
|
512 |
GIC_CLEAR_TRIGGER(irq + i); |
513 |
} |
514 |
} |
515 |
#endif
|
516 |
} else {
|
517 |
/* 0xf00 is only handled for 32-bit writes. */
|
518 |
goto bad_reg;
|
519 |
} |
520 |
gic_update(s); |
521 |
return;
|
522 |
bad_reg:
|
523 |
hw_error("gic_dist_writeb: Bad offset %x\n", (int)offset); |
524 |
} |
525 |
|
526 |
static void gic_dist_writew(void *opaque, target_phys_addr_t offset, |
527 |
uint32_t value) |
528 |
{ |
529 |
gic_dist_writeb(opaque, offset, value & 0xff);
|
530 |
gic_dist_writeb(opaque, offset + 1, value >> 8); |
531 |
} |
532 |
|
533 |
static void gic_dist_writel(void *opaque, target_phys_addr_t offset, |
534 |
uint32_t value) |
535 |
{ |
536 |
gic_state *s = (gic_state *)opaque; |
537 |
#ifdef NVIC
|
538 |
uint32_t addr; |
539 |
addr = offset; |
540 |
if (addr < 0x100 || (addr > 0xd00 && addr != 0xf00)) { |
541 |
nvic_writel(s, addr, value); |
542 |
return;
|
543 |
} |
544 |
#endif
|
545 |
if (offset == 0xf00) { |
546 |
int cpu;
|
547 |
int irq;
|
548 |
int mask;
|
549 |
|
550 |
cpu = gic_get_current_cpu(); |
551 |
irq = value & 0x3ff;
|
552 |
switch ((value >> 24) & 3) { |
553 |
case 0: |
554 |
mask = (value >> 16) & ALL_CPU_MASK;
|
555 |
break;
|
556 |
case 1: |
557 |
mask = ALL_CPU_MASK ^ (1 << cpu);
|
558 |
break;
|
559 |
case 2: |
560 |
mask = 1 << cpu;
|
561 |
break;
|
562 |
default:
|
563 |
DPRINTF("Bad Soft Int target filter\n");
|
564 |
mask = ALL_CPU_MASK; |
565 |
break;
|
566 |
} |
567 |
GIC_SET_PENDING(irq, mask); |
568 |
gic_update(s); |
569 |
return;
|
570 |
} |
571 |
gic_dist_writew(opaque, offset, value & 0xffff);
|
572 |
gic_dist_writew(opaque, offset + 2, value >> 16); |
573 |
} |
574 |
|
575 |
static const MemoryRegionOps gic_dist_ops = { |
576 |
.old_mmio = { |
577 |
.read = { gic_dist_readb, gic_dist_readw, gic_dist_readl, }, |
578 |
.write = { gic_dist_writeb, gic_dist_writew, gic_dist_writel, }, |
579 |
}, |
580 |
.endianness = DEVICE_NATIVE_ENDIAN, |
581 |
}; |
582 |
|
583 |
#ifndef NVIC
|
584 |
static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset) |
585 |
{ |
586 |
switch (offset) {
|
587 |
case 0x00: /* Control */ |
588 |
return s->cpu_enabled[cpu];
|
589 |
case 0x04: /* Priority mask */ |
590 |
return s->priority_mask[cpu];
|
591 |
case 0x08: /* Binary Point */ |
592 |
/* ??? Not implemented. */
|
593 |
return 0; |
594 |
case 0x0c: /* Acknowledge */ |
595 |
return gic_acknowledge_irq(s, cpu);
|
596 |
case 0x14: /* Runing Priority */ |
597 |
return s->running_priority[cpu];
|
598 |
case 0x18: /* Highest Pending Interrupt */ |
599 |
return s->current_pending[cpu];
|
600 |
default:
|
601 |
hw_error("gic_cpu_read: Bad offset %x\n", (int)offset); |
602 |
return 0; |
603 |
} |
604 |
} |
605 |
|
606 |
static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value) |
607 |
{ |
608 |
switch (offset) {
|
609 |
case 0x00: /* Control */ |
610 |
s->cpu_enabled[cpu] = (value & 1);
|
611 |
DPRINTF("CPU %d %sabled\n", cpu, s->cpu_enabled ? "En" : "Dis"); |
612 |
break;
|
613 |
case 0x04: /* Priority mask */ |
614 |
s->priority_mask[cpu] = (value & 0xff);
|
615 |
break;
|
616 |
case 0x08: /* Binary Point */ |
617 |
/* ??? Not implemented. */
|
618 |
break;
|
619 |
case 0x10: /* End Of Interrupt */ |
620 |
return gic_complete_irq(s, cpu, value & 0x3ff); |
621 |
default:
|
622 |
hw_error("gic_cpu_write: Bad offset %x\n", (int)offset); |
623 |
return;
|
624 |
} |
625 |
gic_update(s); |
626 |
} |
627 |
#endif
|
628 |
|
629 |
static void gic_reset(gic_state *s) |
630 |
{ |
631 |
int i;
|
632 |
memset(s->irq_state, 0, GIC_NIRQ * sizeof(gic_irq_state)); |
633 |
for (i = 0 ; i < NUM_CPU(s); i++) { |
634 |
s->priority_mask[i] = 0xf0;
|
635 |
s->current_pending[i] = 1023;
|
636 |
s->running_irq[i] = 1023;
|
637 |
s->running_priority[i] = 0x100;
|
638 |
#ifdef NVIC
|
639 |
/* The NVIC doesn't have per-cpu interfaces, so enable by default. */
|
640 |
s->cpu_enabled[i] = 1;
|
641 |
#else
|
642 |
s->cpu_enabled[i] = 0;
|
643 |
#endif
|
644 |
} |
645 |
for (i = 0; i < 16; i++) { |
646 |
GIC_SET_ENABLED(i, ALL_CPU_MASK); |
647 |
GIC_SET_TRIGGER(i); |
648 |
} |
649 |
#ifdef NVIC
|
650 |
/* The NVIC is always enabled. */
|
651 |
s->enabled = 1;
|
652 |
#else
|
653 |
s->enabled = 0;
|
654 |
#endif
|
655 |
} |
656 |
|
657 |
static void gic_save(QEMUFile *f, void *opaque) |
658 |
{ |
659 |
gic_state *s = (gic_state *)opaque; |
660 |
int i;
|
661 |
int j;
|
662 |
|
663 |
qemu_put_be32(f, s->enabled); |
664 |
for (i = 0; i < NUM_CPU(s); i++) { |
665 |
qemu_put_be32(f, s->cpu_enabled[i]); |
666 |
for (j = 0; j < 32; j++) |
667 |
qemu_put_be32(f, s->priority1[j][i]); |
668 |
for (j = 0; j < GIC_NIRQ; j++) |
669 |
qemu_put_be32(f, s->last_active[j][i]); |
670 |
qemu_put_be32(f, s->priority_mask[i]); |
671 |
qemu_put_be32(f, s->running_irq[i]); |
672 |
qemu_put_be32(f, s->running_priority[i]); |
673 |
qemu_put_be32(f, s->current_pending[i]); |
674 |
} |
675 |
for (i = 0; i < GIC_NIRQ - 32; i++) { |
676 |
qemu_put_be32(f, s->priority2[i]); |
677 |
} |
678 |
for (i = 0; i < GIC_NIRQ; i++) { |
679 |
#ifndef NVIC
|
680 |
qemu_put_be32(f, s->irq_target[i]); |
681 |
#endif
|
682 |
qemu_put_byte(f, s->irq_state[i].enabled); |
683 |
qemu_put_byte(f, s->irq_state[i].pending); |
684 |
qemu_put_byte(f, s->irq_state[i].active); |
685 |
qemu_put_byte(f, s->irq_state[i].level); |
686 |
qemu_put_byte(f, s->irq_state[i].model); |
687 |
qemu_put_byte(f, s->irq_state[i].trigger); |
688 |
} |
689 |
} |
690 |
|
691 |
static int gic_load(QEMUFile *f, void *opaque, int version_id) |
692 |
{ |
693 |
gic_state *s = (gic_state *)opaque; |
694 |
int i;
|
695 |
int j;
|
696 |
|
697 |
if (version_id != 2) |
698 |
return -EINVAL;
|
699 |
|
700 |
s->enabled = qemu_get_be32(f); |
701 |
for (i = 0; i < NUM_CPU(s); i++) { |
702 |
s->cpu_enabled[i] = qemu_get_be32(f); |
703 |
for (j = 0; j < 32; j++) |
704 |
s->priority1[j][i] = qemu_get_be32(f); |
705 |
for (j = 0; j < GIC_NIRQ; j++) |
706 |
s->last_active[j][i] = qemu_get_be32(f); |
707 |
s->priority_mask[i] = qemu_get_be32(f); |
708 |
s->running_irq[i] = qemu_get_be32(f); |
709 |
s->running_priority[i] = qemu_get_be32(f); |
710 |
s->current_pending[i] = qemu_get_be32(f); |
711 |
} |
712 |
for (i = 0; i < GIC_NIRQ - 32; i++) { |
713 |
s->priority2[i] = qemu_get_be32(f); |
714 |
} |
715 |
for (i = 0; i < GIC_NIRQ; i++) { |
716 |
#ifndef NVIC
|
717 |
s->irq_target[i] = qemu_get_be32(f); |
718 |
#endif
|
719 |
s->irq_state[i].enabled = qemu_get_byte(f); |
720 |
s->irq_state[i].pending = qemu_get_byte(f); |
721 |
s->irq_state[i].active = qemu_get_byte(f); |
722 |
s->irq_state[i].level = qemu_get_byte(f); |
723 |
s->irq_state[i].model = qemu_get_byte(f); |
724 |
s->irq_state[i].trigger = qemu_get_byte(f); |
725 |
} |
726 |
|
727 |
return 0; |
728 |
} |
729 |
|
730 |
#if NCPU > 1 |
731 |
static void gic_init(gic_state *s, int num_cpu) |
732 |
#else
|
733 |
static void gic_init(gic_state *s) |
734 |
#endif
|
735 |
{ |
736 |
int i;
|
737 |
|
738 |
#if NCPU > 1 |
739 |
s->num_cpu = num_cpu; |
740 |
#endif
|
741 |
qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, GIC_NIRQ - 32);
|
742 |
for (i = 0; i < NUM_CPU(s); i++) { |
743 |
sysbus_init_irq(&s->busdev, &s->parent_irq[i]); |
744 |
} |
745 |
memory_region_init_io(&s->iomem, &gic_dist_ops, s, "gic_dist", 0x1000); |
746 |
gic_reset(s); |
747 |
register_savevm(NULL, "arm_gic", -1, 2, gic_save, gic_load, s); |
748 |
} |