root / dyngen.h @ 263466f5
History | View | Annotate | Download (13.1 kB)
1 |
/*
|
---|---|
2 |
* dyngen helpers
|
3 |
*
|
4 |
* Copyright (c) 2003 Fabrice Bellard
|
5 |
*
|
6 |
* This library is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* This library is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with this library; if not, write to the Free Software
|
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
19 |
*/
|
20 |
|
21 |
int __op_param1, __op_param2, __op_param3;
|
22 |
#if defined(__sparc__) || defined(__arm__)
|
23 |
void __op_gen_label1(){}
|
24 |
void __op_gen_label2(){}
|
25 |
void __op_gen_label3(){}
|
26 |
#else
|
27 |
int __op_gen_label1, __op_gen_label2, __op_gen_label3;
|
28 |
#endif
|
29 |
int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
|
30 |
|
31 |
#ifdef __i386__
|
32 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
33 |
{ |
34 |
} |
35 |
#endif
|
36 |
|
37 |
#ifdef __x86_64__
|
38 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
39 |
{ |
40 |
} |
41 |
#endif
|
42 |
|
43 |
#ifdef __s390__
|
44 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
45 |
{ |
46 |
} |
47 |
#endif
|
48 |
|
49 |
#ifdef __ia64__
|
50 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
51 |
{ |
52 |
while (start < stop) {
|
53 |
asm volatile ("fc %0" :: "r"(start)); |
54 |
start += 32;
|
55 |
} |
56 |
asm volatile (";;sync.i;;srlz.i;;"); |
57 |
} |
58 |
#endif
|
59 |
|
60 |
#ifdef __powerpc__
|
61 |
|
62 |
#define MIN_CACHE_LINE_SIZE 8 /* conservative value */ |
63 |
|
64 |
static void inline flush_icache_range(unsigned long start, unsigned long stop) |
65 |
{ |
66 |
unsigned long p; |
67 |
|
68 |
start &= ~(MIN_CACHE_LINE_SIZE - 1);
|
69 |
stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1); |
70 |
|
71 |
for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
|
72 |
asm volatile ("dcbst 0,%0" : : "r"(p) : "memory"); |
73 |
} |
74 |
asm volatile ("sync" : : : "memory"); |
75 |
for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
|
76 |
asm volatile ("icbi 0,%0" : : "r"(p) : "memory"); |
77 |
} |
78 |
asm volatile ("sync" : : : "memory"); |
79 |
asm volatile ("isync" : : : "memory"); |
80 |
} |
81 |
#endif
|
82 |
|
83 |
#ifdef __alpha__
|
84 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
85 |
{ |
86 |
asm ("imb"); |
87 |
} |
88 |
#endif
|
89 |
|
90 |
#ifdef __sparc__
|
91 |
|
92 |
static void inline flush_icache_range(unsigned long start, unsigned long stop) |
93 |
{ |
94 |
unsigned long p; |
95 |
|
96 |
p = start & ~(8UL - 1UL); |
97 |
stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL); |
98 |
|
99 |
for (; p < stop; p += 8) |
100 |
__asm__ __volatile__("flush\t%0" : : "r" (p)); |
101 |
} |
102 |
|
103 |
#endif
|
104 |
|
105 |
#ifdef __arm__
|
106 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
107 |
{ |
108 |
register unsigned long _beg __asm ("a1") = start; |
109 |
register unsigned long _end __asm ("a2") = stop; |
110 |
register unsigned long _flg __asm ("a3") = 0; |
111 |
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); |
112 |
} |
113 |
#endif
|
114 |
|
115 |
#ifdef __mc68000
|
116 |
#include <asm/cachectl.h> |
117 |
static inline void flush_icache_range(unsigned long start, unsigned long stop) |
118 |
{ |
119 |
cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16);
|
120 |
} |
121 |
#endif
|
122 |
|
123 |
#ifdef __alpha__
|
124 |
|
125 |
register int gp asm("$29"); |
126 |
|
127 |
static inline void immediate_ldah(void *p, int val) { |
128 |
uint32_t *dest = p; |
129 |
long high = ((val >> 16) + ((val >> 15) & 1)) & 0xffff; |
130 |
|
131 |
*dest &= ~0xffff;
|
132 |
*dest |= high; |
133 |
*dest |= 31 << 16; |
134 |
} |
135 |
static inline void immediate_lda(void *dest, int val) { |
136 |
*(uint16_t *) dest = val; |
137 |
} |
138 |
void fix_bsr(void *p, int offset) { |
139 |
uint32_t *dest = p; |
140 |
*dest &= ~((1 << 21) - 1); |
141 |
*dest |= (offset >> 2) & ((1 << 21) - 1); |
142 |
} |
143 |
|
144 |
#endif /* __alpha__ */ |
145 |
|
146 |
#ifdef __arm__
|
147 |
|
148 |
#define ARM_LDR_TABLE_SIZE 1024 |
149 |
|
150 |
typedef struct LDREntry { |
151 |
uint8_t *ptr; |
152 |
uint32_t *data_ptr; |
153 |
unsigned type:2; |
154 |
} LDREntry; |
155 |
|
156 |
static LDREntry arm_ldr_table[1024]; |
157 |
static uint32_t arm_data_table[ARM_LDR_TABLE_SIZE];
|
158 |
|
159 |
extern char exec_loop; |
160 |
|
161 |
static inline void arm_reloc_pc24(uint32_t *ptr, uint32_t insn, int val) |
162 |
{ |
163 |
*ptr = (insn & ~0xffffff) | ((insn + ((val - (int)ptr) >> 2)) & 0xffffff); |
164 |
} |
165 |
|
166 |
static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr,
|
167 |
LDREntry *ldr_start, LDREntry *ldr_end, |
168 |
uint32_t *data_start, uint32_t *data_end, |
169 |
int gen_jmp)
|
170 |
{ |
171 |
LDREntry *le; |
172 |
uint32_t *ptr; |
173 |
int offset, data_size, target;
|
174 |
uint8_t *data_ptr; |
175 |
uint32_t insn; |
176 |
uint32_t mask; |
177 |
|
178 |
data_size = (data_end - data_start) << 2;
|
179 |
|
180 |
if (gen_jmp) {
|
181 |
/* generate branch to skip the data */
|
182 |
if (data_size == 0) |
183 |
return gen_code_ptr;
|
184 |
target = (long)gen_code_ptr + data_size + 4; |
185 |
arm_reloc_pc24((uint32_t *)gen_code_ptr, 0xeafffffe, target);
|
186 |
gen_code_ptr += 4;
|
187 |
} |
188 |
|
189 |
/* copy the data */
|
190 |
data_ptr = gen_code_ptr; |
191 |
memcpy(gen_code_ptr, data_start, data_size); |
192 |
gen_code_ptr += data_size; |
193 |
|
194 |
/* patch the ldr to point to the data */
|
195 |
for(le = ldr_start; le < ldr_end; le++) {
|
196 |
ptr = (uint32_t *)le->ptr; |
197 |
offset = ((unsigned long)(le->data_ptr) - (unsigned long)data_start) + |
198 |
(unsigned long)data_ptr - |
199 |
(unsigned long)ptr - 8; |
200 |
if (offset < 0) { |
201 |
fprintf(stderr, "Negative constant pool offset\n");
|
202 |
abort(); |
203 |
} |
204 |
switch (le->type) {
|
205 |
case 0: /* ldr */ |
206 |
mask = ~0x00800fff;
|
207 |
if (offset >= 4096) { |
208 |
fprintf(stderr, "Bad ldr offset\n");
|
209 |
abort(); |
210 |
} |
211 |
break;
|
212 |
case 1: /* ldc */ |
213 |
mask = ~0x008000ff;
|
214 |
if (offset >= 1024 ) { |
215 |
fprintf(stderr, "Bad ldc offset\n");
|
216 |
abort(); |
217 |
} |
218 |
break;
|
219 |
case 2: /* add */ |
220 |
mask = ~0xfff;
|
221 |
if (offset >= 1024 ) { |
222 |
fprintf(stderr, "Bad add offset\n");
|
223 |
abort(); |
224 |
} |
225 |
break;
|
226 |
default:
|
227 |
fprintf(stderr, "Bad pc relative fixup\n");
|
228 |
abort(); |
229 |
} |
230 |
insn = *ptr & mask; |
231 |
switch (le->type) {
|
232 |
case 0: /* ldr */ |
233 |
insn |= offset | 0x00800000;
|
234 |
break;
|
235 |
case 1: /* ldc */ |
236 |
insn |= (offset >> 2) | 0x00800000; |
237 |
break;
|
238 |
case 2: /* add */ |
239 |
insn |= (offset >> 2) | 0xf00; |
240 |
break;
|
241 |
} |
242 |
*ptr = insn; |
243 |
} |
244 |
return gen_code_ptr;
|
245 |
} |
246 |
|
247 |
#endif /* __arm__ */ |
248 |
|
249 |
#ifdef __ia64
|
250 |
|
251 |
|
252 |
/* Patch instruction with "val" where "mask" has 1 bits. */
|
253 |
static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val) |
254 |
{ |
255 |
uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16);
|
256 |
# define insn_mask ((1UL << 41) - 1) |
257 |
unsigned long shift; |
258 |
|
259 |
b0 = b[0]; b1 = b[1]; |
260 |
shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */ |
261 |
if (shift >= 64) { |
262 |
m1 = mask << (shift - 64);
|
263 |
v1 = val << (shift - 64);
|
264 |
} else {
|
265 |
m0 = mask << shift; m1 = mask >> (64 - shift);
|
266 |
v0 = val << shift; v1 = val >> (64 - shift);
|
267 |
b[0] = (b0 & ~m0) | (v0 & m0);
|
268 |
} |
269 |
b[1] = (b1 & ~m1) | (v1 & m1);
|
270 |
} |
271 |
|
272 |
static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val) |
273 |
{ |
274 |
ia64_patch(insn_addr, |
275 |
0x011ffffe000UL,
|
276 |
( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ |
277 |
| ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); |
278 |
ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); |
279 |
} |
280 |
|
281 |
static inline void ia64_imm64 (void *insn, uint64_t val) |
282 |
{ |
283 |
/* Ignore the slot number of the relocation; GCC and Intel
|
284 |
toolchains differed for some time on whether IMM64 relocs are
|
285 |
against slot 1 (Intel) or slot 2 (GCC). */
|
286 |
uint64_t insn_addr = (uint64_t) insn & ~3UL;
|
287 |
|
288 |
ia64_patch(insn_addr + 2,
|
289 |
0x01fffefe000UL,
|
290 |
( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ |
291 |
| ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ |
292 |
| ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ |
293 |
| ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ |
294 |
| ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */) |
295 |
); |
296 |
ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); |
297 |
} |
298 |
|
299 |
static inline void ia64_imm60b (void *insn, uint64_t val) |
300 |
{ |
301 |
/* Ignore the slot number of the relocation; GCC and Intel
|
302 |
toolchains differed for some time on whether IMM64 relocs are
|
303 |
against slot 1 (Intel) or slot 2 (GCC). */
|
304 |
uint64_t insn_addr = (uint64_t) insn & ~3UL;
|
305 |
|
306 |
if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) |
307 |
fprintf(stderr, "%s: value %ld out of IMM60 range\n",
|
308 |
__FUNCTION__, (int64_t) val); |
309 |
ia64_patch_imm60(insn_addr + 2, val);
|
310 |
} |
311 |
|
312 |
static inline void ia64_imm22 (void *insn, uint64_t val) |
313 |
{ |
314 |
if (val + (1 << 21) >= (1 << 22)) |
315 |
fprintf(stderr, "%s: value %li out of IMM22 range\n",
|
316 |
__FUNCTION__, (int64_t)val); |
317 |
ia64_patch((uint64_t) insn, 0x01fffcfe000UL,
|
318 |
( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ |
319 |
| ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ |
320 |
| ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ |
321 |
| ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); |
322 |
} |
323 |
|
324 |
/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has
|
325 |
the effect of turning "addl rX=imm22,rY" into "addl
|
326 |
rX=imm22,r0". */
|
327 |
static inline void ia64_imm22_r0 (void *insn, uint64_t val) |
328 |
{ |
329 |
if (val + (1 << 21) >= (1 << 22)) |
330 |
fprintf(stderr, "%s: value %li out of IMM22 range\n",
|
331 |
__FUNCTION__, (int64_t)val); |
332 |
ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20), |
333 |
( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ |
334 |
| ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ |
335 |
| ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ |
336 |
| ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); |
337 |
} |
338 |
|
339 |
static inline void ia64_imm21b (void *insn, uint64_t val) |
340 |
{ |
341 |
if (val + (1 << 20) >= (1 << 21)) |
342 |
fprintf(stderr, "%s: value %li out of IMM21b range\n",
|
343 |
__FUNCTION__, (int64_t)val); |
344 |
ia64_patch((uint64_t) insn, 0x11ffffe000UL,
|
345 |
( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ |
346 |
| ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */)); |
347 |
} |
348 |
|
349 |
static inline void ia64_nop_b (void *insn) |
350 |
{ |
351 |
ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37); |
352 |
} |
353 |
|
354 |
static inline void ia64_ldxmov(void *insn, uint64_t val) |
355 |
{ |
356 |
if (val + (1 << 21) < (1 << 22)) |
357 |
ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37); |
358 |
} |
359 |
|
360 |
static inline int ia64_patch_ltoff(void *insn, uint64_t val, |
361 |
int relaxable)
|
362 |
{ |
363 |
if (relaxable && (val + (1 << 21) < (1 << 22))) { |
364 |
ia64_imm22_r0(insn, val); |
365 |
return 0; |
366 |
} |
367 |
return 1; |
368 |
} |
369 |
|
370 |
struct ia64_fixup {
|
371 |
struct ia64_fixup *next;
|
372 |
void *addr; /* address that needs to be patched */ |
373 |
long value;
|
374 |
}; |
375 |
|
376 |
#define IA64_PLT(insn, plt_index) \
|
377 |
do { \
|
378 |
struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ |
379 |
fixup->next = plt_fixes; \ |
380 |
plt_fixes = fixup; \ |
381 |
fixup->addr = (insn); \ |
382 |
fixup->value = (plt_index); \ |
383 |
plt_offset[(plt_index)] = 1; \
|
384 |
} while (0) |
385 |
|
386 |
#define IA64_LTOFF(insn, val, relaxable) \
|
387 |
do { \
|
388 |
if (ia64_patch_ltoff(insn, val, relaxable)) { \
|
389 |
struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \ |
390 |
fixup->next = ltoff_fixes; \ |
391 |
ltoff_fixes = fixup; \ |
392 |
fixup->addr = (insn); \ |
393 |
fixup->value = (val); \ |
394 |
} \ |
395 |
} while (0) |
396 |
|
397 |
static inline void ia64_apply_fixes (uint8_t **gen_code_pp, |
398 |
struct ia64_fixup *ltoff_fixes,
|
399 |
uint64_t gp, |
400 |
struct ia64_fixup *plt_fixes,
|
401 |
int num_plts,
|
402 |
unsigned long *plt_target, |
403 |
unsigned int *plt_offset) |
404 |
{ |
405 |
static const uint8_t plt_bundle[] = { |
406 |
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */ |
407 |
0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60, |
408 |
|
409 |
0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */ |
410 |
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0 |
411 |
}; |
412 |
uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start, *vp; |
413 |
struct ia64_fixup *fixup;
|
414 |
unsigned int offset = 0; |
415 |
struct fdesc {
|
416 |
long ip;
|
417 |
long gp;
|
418 |
} *fdesc; |
419 |
int i;
|
420 |
|
421 |
if (plt_fixes) {
|
422 |
plt_start = gen_code_ptr; |
423 |
|
424 |
for (i = 0; i < num_plts; ++i) { |
425 |
if (plt_offset[i]) {
|
426 |
plt_offset[i] = offset; |
427 |
offset += sizeof(plt_bundle);
|
428 |
|
429 |
fdesc = (struct fdesc *) plt_target[i];
|
430 |
memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle));
|
431 |
ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp);
|
432 |
ia64_imm60b(gen_code_ptr + 0x12,
|
433 |
(fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4); |
434 |
gen_code_ptr += sizeof(plt_bundle);
|
435 |
} |
436 |
} |
437 |
|
438 |
for (fixup = plt_fixes; fixup; fixup = fixup->next)
|
439 |
ia64_imm21b(fixup->addr, |
440 |
((long) plt_start + plt_offset[fixup->value]
|
441 |
- ((long) fixup->addr & ~0xf)) >> 4); |
442 |
} |
443 |
|
444 |
got_start = gen_code_ptr; |
445 |
|
446 |
/* First, create the GOT: */
|
447 |
for (fixup = ltoff_fixes; fixup; fixup = fixup->next) {
|
448 |
/* first check if we already have this value in the GOT: */
|
449 |
for (vp = got_start; vp < gen_code_ptr; ++vp)
|
450 |
if (*(uint64_t *) vp == fixup->value)
|
451 |
break;
|
452 |
if (vp == gen_code_ptr) {
|
453 |
/* Nope, we need to put the value in the GOT: */
|
454 |
*(uint64_t *) vp = fixup->value; |
455 |
gen_code_ptr += 8;
|
456 |
} |
457 |
ia64_imm22(fixup->addr, (long) vp - gp);
|
458 |
} |
459 |
/* Keep code ptr aligned. */
|
460 |
if ((long) gen_code_ptr & 15) |
461 |
gen_code_ptr += 8;
|
462 |
*gen_code_pp = gen_code_ptr; |
463 |
} |
464 |
|
465 |
#endif
|