summaryrefslogtreecommitdiff
path: root/tcg/ppc/tcg-target.inc.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/ppc/tcg-target.inc.c')
-rw-r--r--tcg/ppc/tcg-target.inc.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index dba954cc2a..eaf1bd9bfd 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1405,6 +1405,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
TCGReg base = TCG_AREG0;
TCGMemOp s_bits = opc & MO_SIZE;
+ int a_bits = get_alignment_bits(opc);
/* Extract the page index, shifted into place for tlb index. */
if (TCG_TARGET_REG_BITS == 64) {
@@ -1462,14 +1463,17 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc,
* the bottom bits and thus trigger a comparison failure on
* unaligned accesses
*/
+ if (a_bits < 0) {
+ a_bits = s_bits;
+ }
tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
- (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
- } else if (s_bits) {
- /* > byte access, we need to handle alignment */
- if ((opc & MO_AMASK) == MO_ALIGN) {
+ (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
+ } else if (a_bits) {
+ /* More than byte access, we need to handle alignment */
+ if (a_bits > 0) {
/* Alignment required by the front-end, same as 32-bits */
tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo,
- 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
+ 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
} else {
/* We support unaligned accesses, we need to make sure we fail