X-Git-Url: https://git.distorted.org.uk/~mdw/catacomb/blobdiff_plain/89717a56084f7cac56330c8527fbaff99b15709b..HEAD:/base/regdump-x86ish.S diff --git a/base/regdump-x86ish.S b/base/regdump-x86ish.S index e4dd8e80..67a4ae0e 100644 --- a/base/regdump-x86ish.S +++ b/base/regdump-x86ish.S @@ -56,48 +56,48 @@ FUNC(regdump_gpsave) cld // Save r/ebp and establish it pointing to the save area. - mov [R_sp(r) + WORDSZ + REGIX_BP*WORDSZ], R_bp(r) - lea R_bp(r), [R_sp(r) + WORDSZ] + mov [SP + WORDSZ + REGIX_BP*WORDSZ], BP + lea BP, [SP + WORDSZ] // Save the other easy general-purpose registers. #if !CPUFAM_X86 - mov [R_bp(r) + REGIX_BX*WORDSZ], R_b(r) + mov [BP + REGIX_BX*WORDSZ], BX #endif - mov [R_bp(r) + REGIX_CX*WORDSZ], R_c(r) - mov [R_bp(r) + REGIX_DX*WORDSZ], R_d(r) - mov [R_bp(r) + REGIX_SI*WORDSZ], R_si(r) - mov [R_bp(r) + REGIX_DI*WORDSZ], R_di(r) + mov [BP + REGIX_CX*WORDSZ], CX + mov [BP + REGIX_DX*WORDSZ], DX + mov [BP + REGIX_SI*WORDSZ], SI + mov [BP + REGIX_DI*WORDSZ], DI #if CPUFAM_AMD64 - mov [R_bp(r) + REGIX_R8*WORDSZ], R_r8(r) - mov [R_bp(r) + REGIX_R9*WORDSZ], R_r9(r) - mov [R_bp(r) + REGIX_R10*WORDSZ], R_r10(r) - mov [R_bp(r) + REGIX_R11*WORDSZ], R_r11(r) - mov [R_bp(r) + REGIX_R12*WORDSZ], R_r12(r) - mov [R_bp(r) + REGIX_R13*WORDSZ], R_r13(r) - mov [R_bp(r) + REGIX_R14*WORDSZ], R_r14(r) - mov [R_bp(r) + REGIX_R15*WORDSZ], R_r15(r) + mov [BP + REGIX_R8*WORDSZ], r8 + mov [BP + REGIX_R9*WORDSZ], r9 + mov [BP + REGIX_R10*WORDSZ], r10 + mov [BP + REGIX_R11*WORDSZ], r11 + mov [BP + REGIX_R12*WORDSZ], r12 + mov [BP + REGIX_R13*WORDSZ], r13 + mov [BP + REGIX_R14*WORDSZ], r14 + mov [BP + REGIX_R15*WORDSZ], r15 #endif // Determine the previous stack pointer and save it. #if CPUFAM_AMD64 && ABI_SYSV - lea R_a(r), [R_bp(r) + 128 + REGDUMP_GPSIZE] + lea AX, [BP + 128 + REGDUMP_GPSIZE] #else - lea R_a(r), [R_bp(r) + REGDUMP_GPSIZE] + lea AX, [BP + REGDUMP_GPSIZE] #endif - mov [R_bp(r) + REGIX_SP*WORDSZ], R_a(r) + mov [BP + REGIX_SP*WORDSZ], AX // Collect the return address and save it as r/eip. - mov R_a(r), [R_sp(r)] - mov [R_bp(r) + REGIX_IP*WORDSZ], R_a(r) + mov AX, [SP] + mov [BP + REGIX_IP*WORDSZ], AX // Save the segment registers. - lea R_a(r), [R_bp(r) + REGIX_GPLIM*WORDSZ] - mov [R_a(r) + 2*REGIX_CS], cs - mov [R_a(r) + 2*REGIX_DS], ds - mov [R_a(r) + 2*REGIX_SS], ss - mov [R_a(r) + 2*REGIX_ES], es - mov [R_a(r) + 2*REGIX_FS], fs - mov [R_a(r) + 2*REGIX_GS], gs + lea AX, [BP + REGIX_GPLIM*WORDSZ] + mov [AX + 2*REGIX_CS], cs + mov [AX + 2*REGIX_DS], ds + mov [AX + 2*REGIX_SS], ss + mov [AX + 2*REGIX_ES], es + mov [AX + 2*REGIX_FS], fs + mov [AX + 2*REGIX_GS], gs // Determine the extended save area size. Preserve ebx on 32-bit x86 // here, because the caller needs it for PLT-indirect calls. @@ -135,23 +135,23 @@ FUNC(regdump_gprstr) // We assume nobody actually fiddled with the segment registers. So // just the actual integer registers to do. - mov R_a(r), [R_bp(r) + REGIX_AX*WORDSZ] - mov R_b(r), [R_bp(r) + REGIX_BX*WORDSZ] - mov R_c(r), [R_bp(r) + REGIX_CX*WORDSZ] - mov R_d(r), [R_bp(r) + REGIX_DX*WORDSZ] - mov R_si(r), [R_bp(r) + REGIX_SI*WORDSZ] - mov R_di(r), [R_bp(r) + REGIX_DI*WORDSZ] + mov AX, [BP + REGIX_AX*WORDSZ] + mov BX, [BP + REGIX_BX*WORDSZ] + mov CX, [BP + REGIX_CX*WORDSZ] + mov DX, [BP + REGIX_DX*WORDSZ] + mov SI, [BP + REGIX_SI*WORDSZ] + mov DI, [BP + REGIX_DI*WORDSZ] #if CPUFAM_AMD64 - mov R_r8(r), [R_bp(r) + REGIX_R8*WORDSZ] - mov R_r9(r), [R_bp(r) + REGIX_R9*WORDSZ] - mov R_r10(r), [R_bp(r) + REGIX_R10*WORDSZ] - mov R_r11(r), [R_bp(r) + REGIX_R11*WORDSZ] - mov R_r12(r), [R_bp(r) + REGIX_R12*WORDSZ] - mov R_r13(r), [R_bp(r) + REGIX_R13*WORDSZ] - mov R_r14(r), [R_bp(r) + REGIX_R14*WORDSZ] - mov R_r15(r), [R_bp(r) + REGIX_R15*WORDSZ] + mov r8, [BP + REGIX_R8*WORDSZ] + mov r9, [BP + REGIX_R9*WORDSZ] + mov r10, [BP + REGIX_R10*WORDSZ] + mov r11, [BP + REGIX_R11*WORDSZ] + mov r12, [BP + REGIX_R12*WORDSZ] + mov r13, [BP + REGIX_R13*WORDSZ] + mov r14, [BP + REGIX_R14*WORDSZ] + mov r15, [BP + REGIX_R15*WORDSZ] #endif - mov R_bp(r), [R_bp(r) + REGIX_BP*WORDSZ] + mov BP, [BP + REGIX_BP*WORDSZ] // Done. ret @@ -175,11 +175,11 @@ FUNC(regdump_xtsave) // general registers are clobbered. // Start by filling in the easy parts of the map. - mov [R_sp(r) + WORDSZ + regmap_gp], R_bp(r) - lea R_bp(r), [R_sp(r) + WORDSZ] + mov [SP + WORDSZ + regmap_gp], BP + lea BP, [SP + WORDSZ] xor eax, eax // clears rax too on amd64 - mov [R_bp(r) + regmap_avx], R_a(r) + mov [BP + regmap_avx], AX // Find out whether we use `xsave'. (Preserve ebx.) #if CPUFAM_X86 @@ -191,40 +191,40 @@ FUNC(regdump_xtsave) je 5f // We have the `xsave' machinery. Select the base address. - lea R_si(r), [R_sp(r) + WORDSZ + regmap_size + 63] - and R_si(r), ~63 - mov [R_bp(r) + regmap_fx], R_si(r) + lea SI, [SP + WORDSZ + regmap_size + 63] + and SI, ~63 + mov [BP + regmap_fx], SI // Clear out the header area. xor eax, eax - lea R_di(r), [R_si(r) + 512] + lea DI, [SI + 512] mov ecx, 16 rep stosd // Save the registers. mov eax, 0x00000007 xor edx, edx - xsave [R_si(r)] + xsave [SI] // Establish the AVX pointer, if available. - test dword ptr [R_si(r) + 512], 4 // = xstate_bv + test dword ptr [SI + 512], 4 // = xstate_bv je 8f mov eax, 13 mov ecx, 2 cpuid - add R_b(r), R_si(r) - mov [R_bp(r) + regmap_avx], R_b(r) + add BX, SI + mov [BP + regmap_avx], BX jmp 8f // We have only `fxsave'. Set the base address. -5: lea R_si(r), [R_sp(r) + WORDSZ + regmap_size + 15] - and R_si(r), ~15 - mov [R_bp(r) + regmap_fx], R_si(r) +5: lea SI, [SP + WORDSZ + regmap_size + 15] + and SI, ~15 + mov [BP + regmap_fx], SI // Save the registers. - fxsave [R_si(r)] + fxsave [SI] // Clear the x87 state; otherwise it can cause trouble later. 8: fninit @@ -245,7 +245,7 @@ FUNC(regdump_xtrstr) // 32-bit x86, and the other general registers are clobbered. // Find the extended register dump. - mov R_si(r), [R_bp(r) + regmap_fx] + mov SI, [BP + regmap_fx] // Probe to find out whether we have `xsave'. #if CPUFAM_X86 @@ -259,14 +259,14 @@ FUNC(regdump_xtrstr) // We have the `xsave' machinery. mov eax, 0x00000007 xor edx, edx - xrstor [R_si(r)] + xrstor [SI] jmp 8f // We must fake it up. -1: fxrstor [R_si(r)] +1: fxrstor [SI] // Done. -8: mov R_bp(r), [R_bp(r) + regmap_gp] +8: mov BP, [BP + regmap_gp] #if CPUFAM_X86 pop ebx #endif