505 |
505 |
8, 9, 10, 11, 12, 13, 14, 15
|
506 |
506 |
};
|
507 |
507 |
#else
|
508 |
|
static const int gpr_map[8] = {0, 1, 2, 3, 4, 5, 6, 7};
|
|
508 |
#define gpr_map gpr_map32
|
509 |
509 |
#endif
|
|
510 |
static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
510 |
511 |
|
511 |
512 |
#define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
|
512 |
513 |
|
... | ... | |
520 |
521 |
static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
|
521 |
522 |
{
|
522 |
523 |
if (n < CPU_NB_REGS) {
|
523 |
|
GET_REGL(env->regs[gpr_map[n]]);
|
|
524 |
if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
|
|
525 |
GET_REG64(env->regs[gpr_map[n]]);
|
|
526 |
} else if (n < CPU_NB_REGS32) {
|
|
527 |
GET_REG32(env->regs[gpr_map32[n]]);
|
|
528 |
}
|
524 |
529 |
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
|
525 |
530 |
#ifdef USE_X86LDOUBLE
|
526 |
531 |
/* FIXME: byteswap float values - after fixing fpregs layout. */
|
... | ... | |
531 |
536 |
return 10;
|
532 |
537 |
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
|
533 |
538 |
n -= IDX_XMM_REGS;
|
534 |
|
stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
|
535 |
|
stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
|
536 |
|
return 16;
|
|
539 |
if (n < CPU_NB_REGS32 ||
|
|
540 |
(TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
|
|
541 |
stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
|
|
542 |
stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
|
|
543 |
return 16;
|
|
544 |
}
|
537 |
545 |
} else {
|
538 |
546 |
switch (n) {
|
539 |
|
case IDX_IP_REG: GET_REGL(env->eip);
|
|
547 |
case IDX_IP_REG:
|
|
548 |
if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
|
|
549 |
GET_REG64(env->eip);
|
|
550 |
} else {
|
|
551 |
GET_REG32(env->eip);
|
|
552 |
}
|
540 |
553 |
case IDX_FLAGS_REG: GET_REG32(env->eflags);
|
541 |
554 |
|
542 |
555 |
case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
|
... | ... | |
592 |
605 |
uint32_t tmp;
|
593 |
606 |
|
594 |
607 |
if (n < CPU_NB_REGS) {
|
595 |
|
env->regs[gpr_map[n]] = ldtul_p(mem_buf);
|
596 |
|
return sizeof(target_ulong);
|
|
608 |
if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
|
|
609 |
env->regs[gpr_map[n]] = ldtul_p(mem_buf);
|
|
610 |
return sizeof(target_ulong);
|
|
611 |
} else if (n < CPU_NB_REGS32) {
|
|
612 |
n = gpr_map32[n];
|
|
613 |
env->regs[n] &= ~0xffffffffUL;
|
|
614 |
env->regs[n] |= (uint32_t)ldl_p(mem_buf);
|
|
615 |
return 4;
|
|
616 |
}
|
597 |
617 |
} else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
|
598 |
618 |
#ifdef USE_X86LDOUBLE
|
599 |
619 |
/* FIXME: byteswap float values - after fixing fpregs layout. */
|
... | ... | |
602 |
622 |
return 10;
|
603 |
623 |
} else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
|
604 |
624 |
n -= IDX_XMM_REGS;
|
605 |
|
env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
|
606 |
|
env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
|
607 |
|
return 16;
|
|
625 |
if (n < CPU_NB_REGS32 ||
|
|
626 |
(TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
|
|
627 |
env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
|
|
628 |
env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
|
|
629 |
return 16;
|
|
630 |
}
|
608 |
631 |
} else {
|
609 |
632 |
switch (n) {
|
610 |
633 |
case IDX_IP_REG:
|
611 |
|
env->eip = ldtul_p(mem_buf);
|
612 |
|
return sizeof(target_ulong);
|
|
634 |
if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
|
|
635 |
env->eip = ldq_p(mem_buf);
|
|
636 |
return 8;
|
|
637 |
} else {
|
|
638 |
env->eip &= ~0xffffffffUL;
|
|
639 |
env->eip |= (uint32_t)ldl_p(mem_buf);
|
|
640 |
return 4;
|
|
641 |
}
|
613 |
642 |
case IDX_FLAGS_REG:
|
614 |
643 |
env->eflags = ldl_p(mem_buf);
|
615 |
644 |
return 4;
|