Revision 867b3201 include/exec/softmmu_template.h

b/include/exec/softmmu_template.h
70 70
#define ADDR_READ addr_read
71 71
#endif
72 72

  
73
#if DATA_SIZE == 8
74
# define BSWAP(X)  bswap64(X)
75
#elif DATA_SIZE == 4
76
# define BSWAP(X)  bswap32(X)
77
#elif DATA_SIZE == 2
78
# define BSWAP(X)  bswap16(X)
79
#else
80
# define BSWAP(X)  (X)
81
#endif
82

  
83
#ifdef TARGET_WORDS_BIGENDIAN
84
# define TGT_BE(X)  (X)
85
# define TGT_LE(X)  BSWAP(X)
86
#else
87
# define TGT_BE(X)  BSWAP(X)
88
# define TGT_LE(X)  (X)
89
#endif
90

  
91
#if DATA_SIZE == 1
92
# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
93
# define helper_be_ld_name  helper_le_ld_name
94
# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
95
# define helper_be_lds_name helper_le_lds_name
96
# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
97
# define helper_be_st_name  helper_le_st_name
98
#else
99
# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
100
# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
101
# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
102
# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
103
# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
104
# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
105
#endif
106

  
107
#ifdef TARGET_WORDS_BIGENDIAN
108
# define helper_te_ld_name  helper_be_ld_name
109
# define helper_te_st_name  helper_be_st_name
110
#else
111
# define helper_te_ld_name  helper_le_ld_name
112
# define helper_te_st_name  helper_le_st_name
113
#endif
114

  
73 115
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
74 116
                                              hwaddr physaddr,
75 117
                                              target_ulong addr,
......
89 131
    return val;
90 132
}
91 133

  
92
/* handle all cases except unaligned access which span two pages */
93 134
#ifdef SOFTMMU_CODE_ACCESS
94
static
135
static __attribute__((unused))
95 136
#endif
96
WORD_TYPE
97
glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
98
                                              target_ulong addr, int mmu_idx,
99
                                              uintptr_t retaddr)
137
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
138
                            uintptr_t retaddr)
100 139
{
101 140
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
102 141
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
103 142
    uintptr_t haddr;
143
    DATA_TYPE res;
104 144

  
105 145
    /* Adjust the given return address.  */
106 146
    retaddr -= GETPC_ADJ;
......
124 164
            goto do_unaligned_access;
125 165
        }
126 166
        ioaddr = env->iotlb[mmu_idx][index];
127
        return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
167

  
168
        /* ??? Note that the io helpers always read data in the target
169
           byte ordering.  We should push the LE/BE request down into io.  */
170
        res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
171
        res = TGT_LE(res);
172
        return res;
128 173
    }
129 174

  
130 175
    /* Handle slow unaligned access (it spans two pages or IO).  */
......
132 177
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
133 178
                    >= TARGET_PAGE_SIZE)) {
134 179
        target_ulong addr1, addr2;
135
        DATA_TYPE res1, res2, res;
180
        DATA_TYPE res1, res2;
136 181
        unsigned shift;
137 182
    do_unaligned_access:
138 183
#ifdef ALIGNED_ONLY
......
142 187
        addr2 = addr1 + DATA_SIZE;
143 188
        /* Note the adjustment at the beginning of the function.
144 189
           Undo that for the recursion.  */
145
        res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
146
            (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
147
        res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
148
            (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
190
        res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
191
        res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
149 192
        shift = (addr & (DATA_SIZE - 1)) * 8;
150
#ifdef TARGET_WORDS_BIGENDIAN
151
        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
152
#else
193

  
194
        /* Little-endian combine.  */
153 195
        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
196
        return res;
197
    }
198

  
199
    /* Handle aligned access or unaligned access in the same page.  */
200
#ifdef ALIGNED_ONLY
201
    if ((addr & (DATA_SIZE - 1)) != 0) {
202
        do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
203
    }
204
#endif
205

  
206
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
207
#if DATA_SIZE == 1
208
    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
209
#else
210
    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
211
#endif
212
    return res;
213
}
214

  
215
#if DATA_SIZE > 1
216
#ifdef SOFTMMU_CODE_ACCESS
217
static __attribute__((unused))
218
#endif
219
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
220
                            uintptr_t retaddr)
221
{
222
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
223
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
224
    uintptr_t haddr;
225
    DATA_TYPE res;
226

  
227
    /* Adjust the given return address.  */
228
    retaddr -= GETPC_ADJ;
229

  
230
    /* If the TLB entry is for a different page, reload and try again.  */
231
    if ((addr & TARGET_PAGE_MASK)
232
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
233
#ifdef ALIGNED_ONLY
234
        if ((addr & (DATA_SIZE - 1)) != 0) {
235
            do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
236
        }
237
#endif
238
        tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
239
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
240
    }
241

  
242
    /* Handle an IO access.  */
243
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
244
        hwaddr ioaddr;
245
        if ((addr & (DATA_SIZE - 1)) != 0) {
246
            goto do_unaligned_access;
247
        }
248
        ioaddr = env->iotlb[mmu_idx][index];
249

  
250
        /* ??? Note that the io helpers always read data in the target
251
           byte ordering.  We should push the LE/BE request down into io.  */
252
        res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
253
        res = TGT_BE(res);
254
        return res;
255
    }
256

  
257
    /* Handle slow unaligned access (it spans two pages or IO).  */
258
    if (DATA_SIZE > 1
259
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
260
                    >= TARGET_PAGE_SIZE)) {
261
        target_ulong addr1, addr2;
262
        DATA_TYPE res1, res2;
263
        unsigned shift;
264
    do_unaligned_access:
265
#ifdef ALIGNED_ONLY
266
        do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
154 267
#endif
268
        addr1 = addr & ~(DATA_SIZE - 1);
269
        addr2 = addr1 + DATA_SIZE;
270
        /* Note the adjustment at the beginning of the function.
271
           Undo that for the recursion.  */
272
        res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
273
        res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
274
        shift = (addr & (DATA_SIZE - 1)) * 8;
275

  
276
        /* Big-endian combine.  */
277
        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
155 278
        return res;
156 279
    }
157 280

  
......
163 286
#endif
164 287

  
165 288
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
166
    /* Note that ldl_raw is defined with type "int".  */
167
    return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
289
    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
290
    return res;
168 291
}
292
#endif /* DATA_SIZE > 1 */
169 293

  
170 294
DATA_TYPE
171 295
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
172 296
                                         int mmu_idx)
173 297
{
174
    return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
175
                                                         GETRA());
298
    return helper_te_ld_name (env, addr, mmu_idx, GETRA());
176 299
}
177 300

  
178 301
#ifndef SOFTMMU_CODE_ACCESS
......
180 303
/* Provide signed versions of the load routines as well.  We can of course
181 304
   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
182 305
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
183
WORD_TYPE
184
glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
185
                                              target_ulong addr, int mmu_idx,
186
                                              uintptr_t retaddr)
306
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
307
                             int mmu_idx, uintptr_t retaddr)
308
{
309
    return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
310
}
311

  
312
# if DATA_SIZE > 1
313
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
314
                             int mmu_idx, uintptr_t retaddr)
187 315
{
188
    return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
189
        (env, addr, mmu_idx, retaddr);
316
    return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
190 317
}
318
# endif
191 319
#endif
192 320

  
193 321
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
......
208 336
    io_mem_write(mr, physaddr, val, 1 << SHIFT);
209 337
}
210 338

  
211
void
212
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
213
                                             target_ulong addr, DATA_TYPE val,
214
                                             int mmu_idx, uintptr_t retaddr)
339
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
340
                       int mmu_idx, uintptr_t retaddr)
215 341
{
216 342
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
217 343
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
......
239 365
            goto do_unaligned_access;
240 366
        }
241 367
        ioaddr = env->iotlb[mmu_idx][index];
368

  
369
        /* ??? Note that the io helpers always read data in the target
370
           byte ordering.  We should push the LE/BE request down into io.  */
371
        val = TGT_LE(val);
242 372
        glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
243 373
        return;
244 374
    }
......
256 386
        /* Note: relies on the fact that tlb_fill() does not remove the
257 387
         * previous page from the TLB cache.  */
258 388
        for (i = DATA_SIZE - 1; i >= 0; i--) {
259
#ifdef TARGET_WORDS_BIGENDIAN
260
            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
261
#else
389
            /* Little-endian extract.  */
262 390
            uint8_t val8 = val >> (i * 8);
391
            /* Note the adjustment at the beginning of the function.
392
               Undo that for the recursion.  */
393
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
394
                                            mmu_idx, retaddr + GETPC_ADJ);
395
        }
396
        return;
397
    }
398

  
399
    /* Handle aligned access or unaligned access in the same page.  */
400
#ifdef ALIGNED_ONLY
401
    if ((addr & (DATA_SIZE - 1)) != 0) {
402
        do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
403
    }
404
#endif
405

  
406
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
407
#if DATA_SIZE == 1
408
    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
409
#else
410
    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
263 411
#endif
412
}
413

  
414
#if DATA_SIZE > 1
415
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
416
                       int mmu_idx, uintptr_t retaddr)
417
{
418
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
419
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
420
    uintptr_t haddr;
421

  
422
    /* Adjust the given return address.  */
423
    retaddr -= GETPC_ADJ;
424

  
425
    /* If the TLB entry is for a different page, reload and try again.  */
426
    if ((addr & TARGET_PAGE_MASK)
427
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
428
#ifdef ALIGNED_ONLY
429
        if ((addr & (DATA_SIZE - 1)) != 0) {
430
            do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
431
        }
432
#endif
433
        tlb_fill(env, addr, 1, mmu_idx, retaddr);
434
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
435
    }
436

  
437
    /* Handle an IO access.  */
438
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
439
        hwaddr ioaddr;
440
        if ((addr & (DATA_SIZE - 1)) != 0) {
441
            goto do_unaligned_access;
442
        }
443
        ioaddr = env->iotlb[mmu_idx][index];
444

  
445
        /* ??? Note that the io helpers always read data in the target
446
           byte ordering.  We should push the LE/BE request down into io.  */
447
        val = TGT_BE(val);
448
        glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
449
        return;
450
    }
451

  
452
    /* Handle slow unaligned access (it spans two pages or IO).  */
453
    if (DATA_SIZE > 1
454
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
455
                     >= TARGET_PAGE_SIZE)) {
456
        int i;
457
    do_unaligned_access:
458
#ifdef ALIGNED_ONLY
459
        do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
460
#endif
461
        /* XXX: not efficient, but simple */
462
        /* Note: relies on the fact that tlb_fill() does not remove the
463
         * previous page from the TLB cache.  */
464
        for (i = DATA_SIZE - 1; i >= 0; i--) {
465
            /* Big-endian extract.  */
466
            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
264 467
            /* Note the adjustment at the beginning of the function.
265 468
               Undo that for the recursion.  */
266 469
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
......
277 480
#endif
278 481

  
279 482
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
280
    glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
483
    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
281 484
}
485
#endif /* DATA_SIZE > 1 */
282 486

  
283 487
void
284 488
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
285 489
                                         DATA_TYPE val, int mmu_idx)
286 490
{
287
    glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
288
                                                 GETRA());
491
    helper_te_st_name(env, addr, val, mmu_idx, GETRA());
289 492
}
290 493

  
291 494
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
......
301 504
#undef SDATA_TYPE
302 505
#undef USUFFIX
303 506
#undef SSUFFIX
507
#undef BSWAP
508
#undef TGT_BE
509
#undef TGT_LE
510
#undef CPU_BE
511
#undef CPU_LE
512
#undef helper_le_ld_name
513
#undef helper_be_ld_name
514
#undef helper_le_lds_name
515
#undef helper_be_lds_name
516
#undef helper_le_st_name
517
#undef helper_be_st_name
518
#undef helper_te_ld_name
519
#undef helper_te_st_name

Also available in: Unified diff